Skip to content

Commit 7940fee

Browse files
nwf-msrnwf
authored andcommitted
Refactor MetaEntry remote_and_sizeclass
Introduce a class that we can use to more completely separate the frontend encoding details from the backend.
1 parent 772e46f commit 7940fee

8 files changed

Lines changed: 124 additions & 109 deletions

File tree

src/backend/backend.h

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -59,10 +59,14 @@ namespace snmalloc
5959
* Set template parameter to true if it not an error
6060
* to access a location that is not backed by a chunk.
6161
*/
62-
template<bool potentially_out_of_range = false>
63-
SNMALLOC_FAST_PATH static const MetaEntry& get_metaentry(address_t p)
62+
template<typename Ret = MetaEntry, bool potentially_out_of_range = false>
63+
SNMALLOC_FAST_PATH static const Ret& get_metaentry(address_t p)
6464
{
65-
return concretePagemap.template get<potentially_out_of_range>(p);
65+
static_assert(
66+
std::is_base_of_v<MetaEntry, Ret> && sizeof(MetaEntry) == sizeof(Ret),
67+
"Backend Pagemap get_metaentry return must look like MetaEntry");
68+
return static_cast<const Ret&>(
69+
concretePagemap.template get<potentially_out_of_range>(p));
6670
}
6771

6872
/**
@@ -250,15 +254,15 @@ namespace snmalloc
250254
* (remote, sizeclass, metaslab)
251255
* where metaslab, is the second element of the pair return.
252256
*/
253-
static std::pair<capptr::Chunk<void>, Metaslab*> alloc_chunk(
254-
LocalState& local_state,
255-
size_t size,
256-
RemoteAllocator* remote,
257-
sizeclass_t sizeclass)
257+
static std::pair<capptr::Chunk<void>, Metaslab*>
258+
alloc_chunk(LocalState& local_state, size_t size, uintptr_t ras)
258259
{
259260
SNMALLOC_ASSERT(bits::is_pow2(size));
260261
SNMALLOC_ASSERT(size >= MIN_CHUNK_SIZE);
261262

263+
SNMALLOC_ASSERT((ras & MetaEntry::REMOTE_BACKEND_MARKER) == 0);
264+
ras &= ~MetaEntry::REMOTE_BACKEND_MARKER;
265+
262266
auto meta_cap =
263267
local_state.get_meta_range()->alloc_range(PAGEMAP_METADATA_STRUCT_SIZE);
264268

@@ -289,7 +293,7 @@ namespace snmalloc
289293

290294
meta->meta_common.chunk = p;
291295

292-
MetaEntry t(meta, remote, sizeclass);
296+
MetaEntry t(&meta->meta_common, ras);
293297
Pagemap::set_metaentry(address_cast(p), size, t);
294298

295299
p = Aal::capptr_bound<void, capptr::bounds::Chunk>(p, size);

src/backend/backend_concept.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,10 +24,10 @@ namespace snmalloc
2424
{
2525
{ Meta::set_metaentry(addr, sz, t) } -> ConceptSame<void>;
2626

27-
{ Meta::template get_metaentry<true>(addr) }
27+
{ Meta::template get_metaentry<MetaEntry, true>(addr) }
2828
-> ConceptSame<const MetaEntry&>;
2929

30-
{ Meta::template get_metaentry<false>(addr) }
30+
{ Meta::template get_metaentry<MetaEntry, false>(addr) }
3131
-> ConceptSame<const MetaEntry&>;
3232
};
3333

src/backend/chunkallocator.h

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -183,10 +183,9 @@ namespace snmalloc
183183
static std::pair<capptr::Chunk<void>, Metaslab*> alloc_chunk(
184184
typename SharedStateHandle::LocalState& local_state,
185185
ChunkAllocatorLocalState& chunk_alloc_local_state,
186-
sizeclass_t sizeclass,
187186
chunksizeclass_t slab_sizeclass,
188187
size_t slab_size,
189-
RemoteAllocator* remote)
188+
uintptr_t ras)
190189
{
191190
using PAL = typename SharedStateHandle::Pal;
192191
ChunkAllocatorState& state =
@@ -234,16 +233,16 @@ namespace snmalloc
234233
<< " memory in stacks " << state.memory_in_stacks
235234
<< std::endl;
236235
#endif
237-
MetaEntry entry{meta, remote, sizeclass};
236+
MetaEntry entry{&meta->meta_common, ras};
238237
SharedStateHandle::Pagemap::set_metaentry(
239238
address_cast(slab), slab_size, entry);
240239
return {slab, meta};
241240
}
242241

243242
// Allocate a fresh slab as there are no available ones.
244243
// First create meta-data
245-
auto [slab, meta] = SharedStateHandle::alloc_chunk(
246-
&local_state, slab_size, remote, sizeclass);
244+
auto [slab, meta] =
245+
SharedStateHandle::alloc_chunk(&local_state, slab_size, ras);
247246
#ifdef SNMALLOC_TRACING
248247
std::cout << "Create slab:" << slab.unsafe_ptr() << " slab_sizeclass "
249248
<< slab_sizeclass << " size " << slab_size << std::endl;

src/backend/metatypes.h

Lines changed: 14 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -79,10 +79,6 @@ namespace snmalloc
7979
#endif
8080
// clang-format on
8181

82-
struct RemoteAllocator;
83-
class Metaslab;
84-
class sizeclass_t;
85-
8682
/**
8783
* Entry stored in the pagemap. See docs/AddressSpace.md for the full
8884
* MetaEntry lifecycle.
@@ -140,41 +136,23 @@ namespace snmalloc
140136
* `get_remote_and_sizeclass`.
141137
*/
142138
SNMALLOC_FAST_PATH
143-
MetaEntry(Metaslab* meta, uintptr_t remote_and_sizeclass)
144-
: meta(unsafe_to_uintptr<Metaslab>(meta)),
139+
MetaEntry(MetaCommon* meta, uintptr_t remote_and_sizeclass)
140+
: meta(unsafe_to_uintptr<MetaCommon>(meta)),
145141
remote_and_sizeclass(remote_and_sizeclass)
146142
{}
147143

148-
/* See mem/metaslab.h */
149-
SNMALLOC_FAST_PATH
150-
MetaEntry(Metaslab* meta, RemoteAllocator* remote, sizeclass_t sizeclass);
151-
152-
/**
153-
* Return the Metaslab metadata associated with this chunk, guarded by an
154-
* assert that this chunk is being used as a slab (i.e., has an associated
155-
* owning allocator).
156-
*/
157-
[[nodiscard]] SNMALLOC_FAST_PATH Metaslab* get_metaslab() const
158-
{
159-
SNMALLOC_ASSERT(get_remote() != nullptr);
160-
return unsafe_from_uintptr<Metaslab>(meta & ~META_BOUNDARY_BIT);
161-
}
162-
163144
/**
164145
* Return the remote and sizeclass in an implementation-defined encoding.
165146
* This is not guaranteed to be stable across snmalloc releases and so the
166147
* only safe use for this is to pass it to the two-argument constructor of
167148
* this class.
168149
*/
169-
[[nodiscard]] SNMALLOC_FAST_PATH uintptr_t get_remote_and_sizeclass() const
150+
[[nodiscard]] SNMALLOC_FAST_PATH const uintptr_t&
151+
get_remote_and_sizeclass() const
170152
{
171153
return remote_and_sizeclass;
172154
}
173155

174-
/* See mem/metaslab.h */
175-
[[nodiscard]] SNMALLOC_FAST_PATH RemoteAllocator* get_remote() const;
176-
[[nodiscard]] SNMALLOC_FAST_PATH sizeclass_t get_sizeclass() const;
177-
178156
MetaEntry(const MetaEntry&) = delete;
179157

180158
MetaEntry& operator=(const MetaEntry& other)
@@ -186,6 +164,16 @@ namespace snmalloc
186164
return *this;
187165
}
188166

167+
/**
168+
* Return the Metaslab metadata associated with this chunk, guarded by an
169+
* assert that this chunk is being used as a slab (i.e., has an associated
170+
* owning allocator).
171+
*/
172+
[[nodiscard]] SNMALLOC_FAST_PATH MetaCommon* get_meta() const
173+
{
174+
return reinterpret_cast<MetaCommon*>(meta & ~META_BOUNDARY_BIT);
175+
}
176+
189177
void set_boundary()
190178
{
191179
meta |= META_BOUNDARY_BIT;
@@ -201,5 +189,4 @@ namespace snmalloc
201189
return meta &= ~META_BOUNDARY_BIT;
202190
}
203191
};
204-
205192
} // namespace snmalloc

src/mem/corealloc.h

Lines changed: 22 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -403,7 +403,8 @@ namespace snmalloc
403403
* by this thread, or handling the final deallocation onto a slab,
404404
* so it can be reused by other threads.
405405
*/
406-
SNMALLOC_SLOW_PATH void dealloc_local_object_slow(const MetaEntry& entry)
406+
SNMALLOC_SLOW_PATH void
407+
dealloc_local_object_slow(const MetaslabMetaEntry& entry)
407408
{
408409
// TODO: Handle message queue on this path?
409410

@@ -486,19 +487,20 @@ namespace snmalloc
486487
[local_state](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA {
487488
return capptr_domesticate<SharedStateHandle>(local_state, p);
488489
};
489-
auto cb = [this, &need_post](freelist::HeadPtr msg)
490-
SNMALLOC_FAST_PATH_LAMBDA {
490+
auto cb = [this,
491+
&need_post](freelist::HeadPtr msg) SNMALLOC_FAST_PATH_LAMBDA {
491492
#ifdef SNMALLOC_TRACING
492-
std::cout << "Handling remote" << std::endl;
493+
std::cout << "Handling remote" << std::endl;
493494
#endif
494495

495-
auto& entry = SharedStateHandle::Pagemap::get_metaentry(
496-
snmalloc::address_cast(msg));
496+
auto& entry =
497+
SharedStateHandle::Pagemap::template get_metaentry<MetaslabMetaEntry>(
498+
snmalloc::address_cast(msg));
497499

498-
handle_dealloc_remote(entry, msg.as_void(), need_post);
500+
handle_dealloc_remote(entry, msg.as_void(), need_post);
499501

500-
return true;
501-
};
502+
return true;
503+
};
502504

503505
if constexpr (SharedStateHandle::Options.QueueHeadsAreTame)
504506
{
@@ -532,7 +534,7 @@ namespace snmalloc
532534
* need_post will be set to true, if capacity is exceeded.
533535
*/
534536
void handle_dealloc_remote(
535-
const MetaEntry& entry,
537+
const MetaslabMetaEntry& entry,
536538
CapPtr<void, capptr::bounds::Alloc> p,
537539
bool& need_post)
538540
{
@@ -672,16 +674,18 @@ namespace snmalloc
672674
SNMALLOC_FAST_PATH void
673675
dealloc_local_object(CapPtr<void, capptr::bounds::Alloc> p)
674676
{
675-
const MetaEntry& entry =
676-
SharedStateHandle::Pagemap::get_metaentry(snmalloc::address_cast(p));
677+
// MetaEntry-s seen here are expected to have meaningful Remote pointers
678+
auto& entry =
679+
SharedStateHandle::Pagemap::template get_metaentry<MetaslabMetaEntry>(
680+
snmalloc::address_cast(p));
677681
if (SNMALLOC_LIKELY(dealloc_local_object_fast(entry, p, entropy)))
678682
return;
679683

680684
dealloc_local_object_slow(entry);
681685
}
682686

683687
SNMALLOC_FAST_PATH static bool dealloc_local_object_fast(
684-
const MetaEntry& entry,
688+
const MetaslabMetaEntry& entry,
685689
CapPtr<void, capptr::bounds::Alloc> p,
686690
LocalEntropy& entropy)
687691
{
@@ -786,8 +790,8 @@ namespace snmalloc
786790
auto [slab, meta] = SharedStateHandle::alloc_chunk(
787791
get_backend_local_state(),
788792
slab_size,
789-
public_state(),
790-
sizeclass_t::from_small_class(sizeclass));
793+
MetaslabMetaEntry::encode(
794+
public_state(), sizeclass_t::from_small_class(sizeclass)));
791795

792796
if (slab == nullptr)
793797
{
@@ -840,8 +844,9 @@ namespace snmalloc
840844
{
841845
bool need_post = true; // Always going to post, so ignore.
842846
auto n_tame = p_tame->atomic_read_next(key_global, domesticate);
843-
auto& entry = SharedStateHandle::Pagemap::get_metaentry(
844-
snmalloc::address_cast(p_tame));
847+
const MetaslabMetaEntry& entry =
848+
SharedStateHandle::Pagemap::template get_metaentry<
849+
MetaslabMetaEntry>(snmalloc::address_cast(p_tame));
845850
handle_dealloc_remote(entry, p_tame.as_void(), need_post);
846851
p_tame = n_tame;
847852
}

src/mem/localalloc.h

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -183,8 +183,8 @@ namespace snmalloc
183183
auto [chunk, meta] = SharedStateHandle::alloc_chunk(
184184
core_alloc->get_backend_local_state(),
185185
large_size_to_chunk_size(size),
186-
core_alloc->public_state(),
187-
size_to_sizeclass_full(size));
186+
MetaslabMetaEntry::encode(
187+
core_alloc->public_state(), size_to_sizeclass_full(size)));
188188
// set up meta data so sizeclass is correct, and hence alloc size, and
189189
// external pointer.
190190
#ifdef SNMALLOC_TRACING
@@ -266,8 +266,9 @@ namespace snmalloc
266266
std::cout << "Remote dealloc post" << p.unsafe_ptr() << " size "
267267
<< alloc_size(p.unsafe_ptr()) << std::endl;
268268
#endif
269-
const MetaEntry& entry =
270-
SharedStateHandle::Pagemap::get_metaentry(address_cast(p));
269+
const MetaslabMetaEntry& entry =
270+
SharedStateHandle::Pagemap::template get_metaentry<MetaslabMetaEntry>(
271+
address_cast(p));
271272
local_cache.remote_dealloc_cache.template dealloc<sizeof(CoreAlloc)>(
272273
entry.get_remote()->trunc_id(), p, key_global);
273274
post_remote_cache();
@@ -624,8 +625,9 @@ namespace snmalloc
624625
capptr::Alloc<void> p_tame = capptr_domesticate<SharedStateHandle>(
625626
core_alloc->backend_state_ptr(), p_wild);
626627

627-
const MetaEntry& entry =
628-
SharedStateHandle::Pagemap::get_metaentry(address_cast(p_tame));
628+
const MetaslabMetaEntry& entry =
629+
SharedStateHandle::Pagemap::template get_metaentry<MetaslabMetaEntry>(
630+
address_cast(p_tame));
629631
if (SNMALLOC_LIKELY(local_cache.remote_allocator == entry.get_remote()))
630632
{
631633
# if defined(__CHERI_PURE_CAPABILITY__) && defined(SNMALLOC_CHECK_CLIENT)
@@ -714,8 +716,9 @@ namespace snmalloc
714716
// To handle this case we require the uninitialised pagemap contain an
715717
// entry for the first chunk of memory, that states it represents a
716718
// large object, so we can pull the check for null off the fast path.
717-
const MetaEntry& entry =
718-
SharedStateHandle::Pagemap::get_metaentry(address_cast(p_raw));
719+
const MetaslabMetaEntry& entry =
720+
SharedStateHandle::Pagemap::template get_metaentry<MetaslabMetaEntry>(
721+
address_cast(p_raw));
719722

720723
return sizeclass_full_to_size(entry.get_sizeclass());
721724
#endif
@@ -759,9 +762,8 @@ namespace snmalloc
759762
size_t remaining_bytes(const void* p)
760763
{
761764
#ifndef SNMALLOC_PASS_THROUGH
762-
const MetaEntry& entry =
763-
SharedStateHandle::Pagemap::template get_metaentry<true>(
764-
address_cast(p));
765+
const MetaslabMetaEntry& entry = SharedStateHandle::Pagemap::
766+
template get_metaentry<MetaslabMetaEntry, true>(address_cast(p));
765767

766768
auto sizeclass = entry.get_sizeclass();
767769
return snmalloc::remaining_bytes(sizeclass, address_cast(p));
@@ -788,9 +790,8 @@ namespace snmalloc
788790
size_t index_in_object(const void* p)
789791
{
790792
#ifndef SNMALLOC_PASS_THROUGH
791-
const MetaEntry& entry =
792-
SharedStateHandle::Pagemap::template get_metaentry<true>(
793-
address_cast(p));
793+
const MetaslabMetaEntry& entry = SharedStateHandle::Pagemap::
794+
template get_metaentry<MetaslabMetaEntry, true>(address_cast(p));
794795

795796
auto sizeclass = entry.get_sizeclass();
796797
return snmalloc::index_in_object(sizeclass, address_cast(p));

0 commit comments

Comments
 (0)