Skip to content

Commit

Permalink
[GC] Rework the bin's batch allocation algorithm.
Browse files Browse the repository at this point in the history
  • Loading branch information
deadalnix committed Dec 4, 2024
1 parent 7f9e897 commit 7ce40a5
Showing 1 changed file with 79 additions and 94 deletions.
173 changes: 79 additions & 94 deletions sdlib/d/gc/bin.d
Original file line number Diff line number Diff line change
Expand Up @@ -35,62 +35,100 @@ struct Bin {
"Invalid arena or sizeClass!");
assert(slotSize == binInfos[sizeClass].slotSize, "Invalid slot size!");

mutex.lock();
scope(exit) mutex.unlock();
assert(bottom < top, "Invalid stack boundaries!");
assert((top - bottom) < uint.max, "Invalid stack size!");

return (cast(Bin*) &this)
.batchAllocateImpl(filler, emap, sizeClass, top, bottom, slotSize);
}
/**
* When we run out of slab with free space, we allocate a fresh slab.
* However, while we do so, another thread may have returned slabs to
* the bin, so we might end up not using our fresh slab.
*/
Extent* freshSlab = null;

/**
* Allocating fresh slab might fail, for instance if the system
* runs out of memory. Before we attempt to allocate one, we make
* sure we made progress since the last attempt.
*/
bool progressed = true;

/**
* We insert from the bottom up!
*/
auto insert = bottom;

uint batchFree(const(void*)[] worklist, PageDescriptor* pds,
Extent** dallocSlabs, ref uint ndalloc) shared {
mutex.lock();
scope(exit) mutex.unlock();
Refill: {
mutex.lock();
scope(exit) mutex.unlock();

return (cast(Bin*) &this)
.batchFreeImpl(worklist, pds, dallocSlabs, ndalloc);
}
auto slabs = &(cast(Bin*) &this).slabs;

private:
void** batchAllocateImpl(
shared(PageFiller)* filler,
ref CachedExtentMap emap,
ubyte sizeClass,
void** top,
void** bottom,
size_t slotSize,
) {
// FIXME: in contract.
assert(mutex.isHeld(), "Mutex not held!");
assert(bottom < top, "Invalid stack boundaries!");
assert((top - bottom) < uint.max, "Invalid stack size!");
while (insert !is top) {
assert(insert < top, "Insert out of bounds!");

auto insert = bottom;
while (insert !is top) {
assert(insert < top, "Insert out of bounds!");
auto e = slabs.top;
if (unlikely(e is null)) {
if (freshSlab is null) {
// Let's go fetch a new fresh slab.
goto Refresh;
}

auto e = getSlab(filler, emap, sizeClass);
if (unlikely(e is null)) {
break;
}
// We have a fresh slab, use it!
slabs.insert(freshSlab);
freshSlab = null;
continue;
}

assert(e.nfree > 0);
uint nfill = (top - insert) & uint.max;
insert = e.batchAllocate(insert, nfill, slotSize);
assert(bottom <= insert && insert <= top);

assert(e.nfree > 0);
uint nfill = (top - insert) & uint.max;
insert = e.batchAllocate(insert, nfill, slotSize);
assert(bottom <= insert && insert <= top);
progressed = true;

// If the slab is not full, we are done.
if (e.nfree > 0) {
break;
// If the slab is not full, we are done.
if (e.nfree > 0) {
goto Exit;
}

// The slab is full, remove from the heap.
slabs.remove(e);
}
}

// The slab is full, remove from the heap.
slabs.remove(e);
Exit:
if (freshSlab !is null) {
filler.freeExtent(emap, freshSlab);
}

return insert;

Refresh:
assert(insert !is top);
assert(freshSlab is null);

if (!progressed) {
goto Exit;
}

freshSlab = filler.allocSlab(emap, sizeClass);
auto nslots = binInfos[sizeClass].nslots;
assert(freshSlab is null || freshSlab.nfree == nslots);

progressed = false;
goto Refill;
}

uint batchFree(const(void*)[] worklist, PageDescriptor* pds,
Extent** dallocSlabs, ref uint ndalloc) shared {
mutex.lock();
scope(exit) mutex.unlock();

return (cast(Bin*) &this)
.batchFreeImpl(worklist, pds, dallocSlabs, ndalloc);
}

private:
uint batchFreeImpl(const(void*)[] worklist, PageDescriptor* pds,
Extent** dallocSlabs, ref uint ndalloc) {
// FIXME: in contract.
Expand Down Expand Up @@ -145,59 +183,6 @@ private:
return ndeferred;
}

auto getSlab(shared(PageFiller)* filler, ref CachedExtentMap emap,
ubyte sizeClass) {
// FIXME: in contract.
assert(mutex.isHeld(), "Mutex not held!");

auto slab = slabs.top;
if (slab !is null) {
return slab;
}

{
// Release the lock while we allocate a slab.
mutex.unlock();
scope(exit) mutex.lock();

// We don't have a suitable slab, so allocate one.
slab = filler.allocSlab(emap, sizeClass);
}

if (unlikely(slab is null)) {
// Another thread might have been successful
// while we did not hold the lock.
return slabs.top;
}

// We may have allocated the slab we need when the lock was released.
if (likely(slabs.top is null)) {
slabs.insert(slab);
return slab;
}

// We are about to release the freshly allocated slab.
// We do not want another thread stealing the slab we intend
// to use from under our feets, so we keep it around.
auto current = slabs.pop();

assert(slab !is current);
assert(slab.nfree == binInfos[sizeClass].nslots);

{
// Release the lock while we release the slab.
mutex.unlock();
scope(exit) mutex.lock();

filler.freeExtent(emap, slab);
}

// Now we put it back, which ensure we have at least one
// slab available that we can return.
slabs.insert(current);
return slabs.top;
}

/**
* GC facilities.
*/
Expand Down

0 comments on commit 7ce40a5

Please sign in to comment.