Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Freespace and finalization support in extent, using new metadata scheme #311

Closed
wants to merge 27 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
39891fa
freespace and finalizer support in extent, using new metadata scheme
dsm9000 Oct 12, 2023
2ef55cf
misc. corrections.
dsm9000 Oct 13, 2023
11f7292
misc. corrections.
dsm9000 Oct 13, 2023
7436750
misc. corrections.
dsm9000 Oct 13, 2023
c144e98
proper typing for finalizer
dsm9000 Oct 13, 2023
ad4f509
misc. corrections
dsm9000 Oct 13, 2023
0ba8de6
misc. corrections
dsm9000 Oct 13, 2023
00b07f6
misc. corrections
dsm9000 Oct 13, 2023
ef5bb5e
sync with master and revise slab finalizer set/get logic
dsm9000 Oct 13, 2023
9f6c2b1
cosmetic fix in setFinalizer
dsm9000 Oct 13, 2023
d297aac
cosmetic fix in setFinalizer
dsm9000 Oct 13, 2023
f2bf191
cosmetic fix in setFinalizer
dsm9000 Oct 13, 2023
ba2c68c
rename getTotalSpace to getTotalCapacity
dsm9000 Oct 13, 2023
8e7df00
reccd changes.
dsm9000 Oct 13, 2023
47b25d1
reccd changes.
dsm9000 Oct 13, 2023
be39a89
reccd changes
dsm9000 Oct 14, 2023
01e4993
misc. corrections
dsm9000 Oct 14, 2023
8178abe
tighter contract in setFreeSpace
dsm9000 Oct 14, 2023
6ec50e3
simplify
dsm9000 Oct 14, 2023
02f0061
complete support for finalizers in small allocs.
dsm9000 Oct 14, 2023
5abc328
sync with master
dsm9000 Oct 14, 2023
609fb45
minor correction
dsm9000 Oct 14, 2023
ea6e1c6
abolish hasFinalizer()
dsm9000 Oct 14, 2023
778e1c7
minor correction
dsm9000 Oct 14, 2023
df6956f
move slab metadata logic from extent.d into slab.d, simplify radically
dsm9000 Oct 15, 2023
2d9aee7
move slab metadata logic from extent.d into slab.d, simplify radically
dsm9000 Oct 15, 2023
0ee5aa8
move slab metadata logic from extent.d into slab.d, simplify radically
dsm9000 Oct 15, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
107 changes: 12 additions & 95 deletions sdlib/d/gc/extent.d
Original file line number Diff line number Diff line change
Expand Up @@ -318,43 +318,31 @@ public:
return _metadata.slabData.slabMetaData.slabMetaDataFlags;
}

ushort* freeSpacePtr(uint index) {
assert(isSlab(), "freeSpacePtr accessed on non slab!");
bool hasMetaData(uint index) {
assert(isSlab(), "hasMetaData accessed on non slab!");
assert(sizeClassSupportsMetadata(sizeClass),
"size class not supports slab metadata!");
assert(index < slotCount, "index is out of range!");

return cast(ushort*) (address + (index + 1) * slotSize) - 2;
return slabMetaDataFlags.valueAtAtomic(index);
}

void setFreeSpace(uint index, size_t freeSpace) {
assert(isSlab(), "setFreeSpace accessed on non slab!");
void enableMetaData(uint index) {
assert(isSlab(), "hasMetaData accessed on non slab!");
assert(sizeClassSupportsMetadata(sizeClass),
"size class not supports slab metadata!");
assert(freeSpace <= slotSize, "freeSpace exceeds alloc size!");
assert(index < slotCount, "index is out of range!");

if (freeSpace == 0) {
slabMetaDataFlags.clearBitAtomic(index);
return;
}

// Encode freespace and write it to the last byte (or two bytes) of alloc.
writePackedFreeSpace(freeSpacePtr(index), freeSpace & ushort.max);
slabMetaDataFlags.setBitAtomic(index);
}

size_t getFreeSpace(uint index) {
assert(isSlab(), "getFreeSpace accessed on non slab!");
void disableMetaData(uint index) {
assert(isSlab(), "hasMetaData accessed on non slab!");
assert(sizeClassSupportsMetadata(sizeClass),
"size class not supports slab metadata!");
assert(index < slotCount, "index is out of range!");

if (!slabMetaDataFlags.valueAtAtomic(index)) {
return 0;
}

// Decode freespace, found in the final byte (or two bytes) of the alloc:
return readPackedFreeSpace(freeSpacePtr(index));
slabMetaDataFlags.clearBitAtomic(index);
}

/**
Expand All @@ -365,7 +353,7 @@ public:
}

@property
ulong usedCapacity() {
size_t usedCapacity() {
assert(isLarge(), "usedCapacity accessed on non large!");
return _metadata.largeData.usedCapacity;
}
Expand All @@ -387,11 +375,10 @@ public:
}
}

unittest finalizers {
static void destruct(void* ptr, size_t size) {}

unittest largeData {
// Basic test for large allocs:
import d.gc.tcache;
static void destruct(void* ptr, size_t size) {}
auto large = threadCache.alloc(20000, false);
auto largePd = threadCache.getPageDescriptor(large);
largePd.extent.setUsedCapacity(19999);
Expand Down Expand Up @@ -482,73 +469,3 @@ unittest allocfree {
e.free(1);
assert(e.freeSlots == 512);
}

/**
* Packed Free Space is stored as a 14-bit unsigned integer, in one or two bytes:
*
* /---- byte at ptr ----\ /-- byte at ptr + 1 --\
* B7 B6 B5 B4 B3 B2 B1 B0 A7 A6 A5 A4 A3 A2 A1 A0
* \_______14 bits unsigned integer________/ \ \_ Set if and only if B0..B7 used.
* \_ Set when finalizer is present;
* preserved when writing.
*/

static assert(MaxSmallSize < 0x4000,
"Max small alloc size doesn't fit in 14 bits!");

ushort readPackedFreeSpace(ushort* ptr) {
auto data = loadBigEndian(ptr);
auto mask = 0x3f | -(data & 1);
return (data >> 2) & mask;
}

void writePackedFreeSpace(ushort* ptr, ushort x) {
assert(x < 0x4000, "x does not fit in 14 bits!");

bool isLarge = x > 0x3f;
ushort native = (x << 2 | isLarge) & ushort.max;
auto base = nativeToBigEndian(native);

auto smallMask = nativeToBigEndian!ushort(ushort(0xfd));
auto largeMask = nativeToBigEndian!ushort(ushort(0xfffd));
auto mask = isLarge ? largeMask : smallMask;

auto current = *ptr;
auto delta = (current ^ base) & mask;
auto value = current ^ delta;

*ptr = value & ushort.max;
}

unittest packedFreeSpace {
enum FinalizerBit = nativeToBigEndian!ushort(ushort(0x2));

ubyte[2] a;
auto p = cast(ushort*) a.ptr;

foreach (ushort i; 0 .. 0x4000) {
// With finalizer bit set:
*p |= FinalizerBit;
writePackedFreeSpace(p, i);
assert(readPackedFreeSpace(p) == i);
assert(*p & FinalizerBit);

// With finalizer bit cleared:
*p &= ~FinalizerBit;
// Should remain same as before:
assert(readPackedFreeSpace(p) == i);
writePackedFreeSpace(p, i);
assert(!(*p & FinalizerBit));
}

// Make sure we do not distrub the penultimate byte
// when the value is small enough.
foreach (x; 0 .. 256) {
a[0] = 0xff & x;
foreach (ubyte y; 0 .. 0x40) {
writePackedFreeSpace(p, y);
assert(readPackedFreeSpace(p) == y);
assert(a[0] == x);
}
}
}
233 changes: 233 additions & 0 deletions sdlib/d/gc/slab.d
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
module d.gc.slab;

import d.gc.emap;
import d.gc.extent;
import d.gc.spec;
import d.gc.util;

enum InvalidBinID = 0xff;

Expand All @@ -23,6 +25,237 @@ struct SlabAllocGeometry {
}
}

struct SlabAllocInfo {
private:
bool allowsMetaData = false;
SlabAllocGeometry sg;
Extent* e;

void enableMetaData() {
assert(allowsMetaData, "size class not supports slab metadata!");

if (!hasMetaData) {
e.enableMetaData(sg.index);
hasMetaData = true;
}
}

void disableMetaData() {
assert(allowsMetaData, "size class not supports slab metadata!");

e.disableMetaData(sg.index);
hasMetaData = false;
}

@property
T* ptrToAllocEnd(T)() {
return cast(T*) (sg.address + sg.size) - T.sizeof;
}

alias freeSpacePtr = ptrToAllocEnd!ushort;
alias finalizerPtr = ptrToAllocEnd!size_t;

@property
size_t freeSpace() {
if (!hasMetaData) {
return 0;
}

// Decode freespace, found in the final byte (or two bytes) of the alloc:
return readPackedFreeSpace(freeSpacePtr);
}

void setFreeSpace(size_t size) {
assert(allowsMetaData, "size class not supports slab metadata!");

if (size == 0) {
disableMetaData();
return;
}

writePackedFreeSpace(freeSpacePtr, size & ushort.max);
enableMetaData();
}

enum FinalizerBit = nativeToBigEndian!size_t(0x2);

@property
bool finalizerEnabled() {
return hasMetaData && *finalizerPtr & FinalizerBit;
}

public:
bool hasMetaData = false;

this(PageDescriptor pd, const void* ptr) {
assert(pd.isSlab(), "Expected a slab!");

e = pd.extent;
sg = SlabAllocGeometry(pd, ptr);
allowsMetaData = sizeClassSupportsMetadata(pd.sizeClass);
hasMetaData = allowsMetaData && e.hasMetaData(sg.index);
}

@property
auto address() {
return sg.address;
}

@property
size_t usedCapacity() {
if (!allowsMetaData) {
return 0;
}

return sg.size - freeSpace;
}

bool setUsedCapacity(size_t size) {
if (!allowsMetaData || (size > slotCapacity)) {
return false;
}

setFreeSpace(sg.size - size);
return true;
}

@property
size_t slotCapacity() {
return sg.size - (finalizerEnabled ? PointerSize : 0);
}

@property
Finalizer finalizer() {
if (!finalizerEnabled) {
return null;
}

return cast(Finalizer) cast(void*)
(*ptrToAllocEnd!size_t & AddressMask);
}

void setFinalizer(Finalizer newFinalizer) {
assert(
hasMetaData,
"Metadata is not enabled! (must set freespace before finalizer)");

if (newFinalizer is null) {
*finalizerPtr &= ~FinalizerBit;
return;
}

auto iFinalizer = cast(size_t) cast(void*) newFinalizer;
assert((iFinalizer & AddressMask) == iFinalizer,
"invalid finalizer pointer!");

auto newMetaData = (*finalizerPtr & ~AddressMask) | FinalizerBit;
*finalizerPtr = newMetaData | iFinalizer;
}
}

unittest finalizers {
static void destruct_a(void* ptr, size_t size) {}
static void destruct_b(void* ptr, size_t size) {}

// Basic test for small allocs:
import d.gc.tcache;
auto small = threadCache.allocAppendable(1000, false);
auto smallPd = threadCache.getPageDescriptor(small);

import d.gc.slab;
auto si = SlabAllocInfo(smallPd, small);
assert(si.slotCapacity == 1024);
assert(si.finalizer is null);

// Set a finalizer:
si.setFinalizer(&destruct_a);
auto slotCapacity = si.slotCapacity;
assert(slotCapacity == 1016);

foreach (size_t i; 0 .. slotCapacity + 1) {
si.setFinalizer(&destruct_a);
// Confirm that setting freespace does not clobber finalizer:
si.setUsedCapacity(i);
assert(cast(void*) si.finalizer == cast(void*) &destruct_a);
// Confirm that setting finalizer does not clobber freespace:
si.setFinalizer(&destruct_b);
assert(si.usedCapacity == i);
assert(cast(void*) si.finalizer == cast(void*) &destruct_b);
}

threadCache.free(small);
}

/**
* Packed Free Space is stored as a 14-bit unsigned integer, in one or two bytes:
*
* /---- byte at ptr ----\ /-- byte at ptr + 1 --\
* B7 B6 B5 B4 B3 B2 B1 B0 A7 A6 A5 A4 A3 A2 A1 A0
* \_______14 bits unsigned integer________/ \ \_ Set if and only if B0..B7 used.
* \_ Set when finalizer is present;
* preserved when writing.
*/

static assert(MaxSmallSize < 0x4000,
"Max small alloc size doesn't fit in 14 bits!");

ushort readPackedFreeSpace(ushort* ptr) {
auto data = loadBigEndian(ptr);
auto mask = 0x3f | -(data & 1);
return (data >> 2) & mask;
}

void writePackedFreeSpace(ushort* ptr, ushort x) {
assert(x < 0x4000, "x does not fit in 14 bits!");

bool isLarge = x > 0x3f;
ushort native = (x << 2 | isLarge) & ushort.max;
auto base = nativeToBigEndian(native);

auto smallMask = nativeToBigEndian!ushort(ushort(0xfd));
auto largeMask = nativeToBigEndian!ushort(ushort(0xfffd));
auto mask = isLarge ? largeMask : smallMask;

auto current = *ptr;
auto delta = (current ^ base) & mask;
auto value = current ^ delta;

*ptr = value & ushort.max;
}

unittest packedFreeSpace {
enum FinalizerBit = nativeToBigEndian!ushort(ushort(0x2));

ubyte[2] a;
auto p = cast(ushort*) a.ptr;

foreach (ushort i; 0 .. 0x4000) {
// With finalizer bit set:
*p |= FinalizerBit;
writePackedFreeSpace(p, i);
assert(readPackedFreeSpace(p) == i);
assert(*p & FinalizerBit);

// With finalizer bit cleared:
*p &= ~FinalizerBit;
// Should remain same as before:
assert(readPackedFreeSpace(p) == i);
writePackedFreeSpace(p, i);
assert(!(*p & FinalizerBit));
}

// Make sure we do not distrub the penultimate byte
// when the value is small enough.
foreach (x; 0 .. 256) {
a[0] = 0xff & x;
foreach (ubyte y; 0 .. 0x40) {
writePackedFreeSpace(p, y);
assert(readPackedFreeSpace(p) == y);
assert(a[0] == x);
}
}
}

struct BinInfo {
ushort itemSize;
ushort slots;
Expand Down
Loading