/*
* We divide partially-filled superblocks into four fullness classes.
* Generally, fullness class N represent blocks where the precentage of
- * free objects is >= (N * 25%) and < (N * 25%). As an exception, however,
- * the superblock from which we're allocating is always in fullness class 0;
- * we only move it to the appropriate class once it is completely filled.
+ * free objects is >= (N * 25%) and < (N * 25%), but we only ever allocate
+ * from superblocks in fullness class 1, so the active superblock will
+ * always be in that class regardless of fullness. Moreover, we're lazy about
+ * moving superblocks between lists, so there's no guarantee that the
+ * actual degree of fullness for a given superblock matches the list that
+ * it's currently on.
*/
#define MSPAN_NUMBER_OF_FULLNESS_CLASSES 4
#define MSPAN_SMALL_ALLOCATION_LISTS \
};
/* Helper functions. */
+static int mspan_adjust_superblock_fullness(char *base, mspan_context *cxt,
+ mspan *superblock,
+ int current_fullness_class);
static mspan_context *mspan_allocate_context_descriptor(char *base,
mspan_manager *mgr);
static void mspan_destroy_span(char *base, mspan *span);
{
char *base = (seg != NULL ? dsm_segment_address(seg) : NULL);
uint16 size_class;
+ int aidx;
/* If it's bigger than the largest size class, allocate whole pages. */
if (size > mspan_size_classes[lengthof(mspan_size_classes) - 1])
Assert(size <= mspan_size_classes[size_class]);
/*
- * XXX Search existing superblocks for a chunk of memory.
+ * Allocate from a superblock for the appropriate size class.
+ *
+ * We always allocate from fullness class 1. Whatever superblock is
+ * at the head of that list becomes our victim for allocation until it's
+ * completely full, at which point we'll move it to its proper fullness
+ * class and allocate from the next block on the list. If there isn't one,
+ * we'll call mspan_ensure_active_superblock to find or create a suitable
+ * block.
+ *
+ * You might wonder why we allocate from fullness class 1 rather than
+ * fullness class 0. The reason is that it's much better to have a smaller
+ * number of superblocks with higher average utilization than a larger
+ * number with lower utilization. When a superblock has only a few
+ * remaining allocations, we prefer to hold off allocating from it in the
+ * hopes that the remaining chunks will soon be freed, allowing us to
+ * deallocate the entire superblock.
*/
+ aidx = size_class * MSPAN_NUMBER_OF_FULLNESS_CLASSES + 1;
+ for (;;)
+ {
+ mspan *superblock;
+ void *result;
+
+ if (relptr_is_null(cxt->small_allocation[aidx]))
+ mspan_ensure_active_superblock(base, cxt, size_class);
+ superblock = relptr_access(base, cxt->small_allocation[aidx]);
+ Assert(superblock->span_type == size_class);
+ result = mspan_allocate_from_superblock(base, superblock);
+ if (result != NULL)
+ return result;
+ mspan_adjust_superblock_fullness(base, cxt, superblock, 1);
+ }
+}
- /*
- * XXX If we couldn't find memory in an existing superblock, allocate
- * a new superblock for the appropriate size class.
- */
+/*
+ * Determine whether a superblock is filed under the appropriate fullness
+ * class, and if not, move it to the right place. The return value is the
+ * new fullness class for the superblock.
+ */
+static int
+mspan_adjust_superblock_fullness(char *base, mspan_context *cxt,
+ mspan *superblock, int current_fullness_class)
+{
+ uint16 total;
+ int fullness_class;
+
+ Assert(superblock->span_type <= mspan_size_classes[superblock->span_type]);
+ total = MSPAN_SUPERBLOCK_SIZE / mspan_size_classes[superblock->span_type];
+ Assert(superblock->nused <= total);
+ if (superblock->nused == 0)
+ fullness_class = 0;
+ else
+ {
+ fullness_class = ((superblock->nused *
+ MSPAN_NUMBER_OF_FULLNESS_CLASSES) - 1) / total;
+ Assert(fullness_class < MSPAN_NUMBER_OF_FULLNESS_CLASSES);
+ }
+
+ if (fullness_class != current_fullness_class)
+ {
+ int aidx;
+ mspan *head;
+
+ /* It's on the wrong list, so unlink it from where it is now... */
+ mspan_unlink_span(base, superblock);
+
+ /* ...and put it where it's supposed to be. */
+ aidx = superblock->span_type * MSPAN_NUMBER_OF_FULLNESS_CLASSES
+ + fullness_class;
+ head = relptr_access(base, cxt->small_allocation[aidx]);
+ if (head == NULL)
+ {
+ relptr_store(base, superblock->nextspan, superblock);
+ relptr_store(base, superblock->prevspan, superblock);
+ }
+ else
+ {
+ mspan *tail;
+
+ tail = relptr_access(base, head->prevspan);
+ relptr_store(base, superblock->nextspan, head);
+ superblock->prevspan.relptr_off = head->prevspan.relptr_off;
+ relptr_store(base, head->prevspan, superblock);
+ relptr_store(base, tail->nextspan, superblock);
+ }
+ relptr_store(base, cxt->small_allocation[aidx], superblock);
+ }
+
+ return fullness_class;
}
/*
int i;
int aidx;
- Assert(aidx < lengthof(mspan_size_classes));
aidx = span->span_type * MSPAN_NUMBER_OF_FULLNESS_CLASSES;
for (i = 0; i < MSPAN_NUMBER_OF_FULLNESS_CLASSES; ++i)
{