uint16 size_class);
static mspan *mspan_find_free_span(char *base, mspan_manager *mgr,
Size minpages, Size maxpages);
+static void mspan_initialize_span(char *base, mspan_manager *mgr,
+ mspan_context *cxt, mspan *span, uint16 span_type);
static void mspan_link_span_to_context(char *base, mspan_context *cxt,
mspan *span);
static void mspan_link_span_to_manager(char *base, mspan_manager *mgr,
mspan_allocate_span(char *base, mspan_manager *mgr, mspan_context *cxt,
uint16 span_type, Size pages)
{
- mspan *span;
+ mspan *span;
+ Size first_page;
/*
* Search for an existing span. If we're allocating space for a large
{
/* Remove the span from the free list. */
mspan_unlink_span(base, span);
-
- /* Initialize the span for use. */
- span->span_type = span_type;
- span->ninitialized = 0;
- span->nused = 0;
- span->firstfree = MSPAN_FIRSTFREE_NONE;
-
- /* Put the span on the list that ought to contain it. */
- if (span_type == MSPAN_TYPE_SPAN_OF_SPANS)
- {
- Assert(cxt == NULL);
- mspan_link_span_to_manager(base, mgr, span);
- }
- else
- {
- Assert(cxt != NULL);
- mspan_link_span_to_context(base, cxt, span);
- }
+ mspan_initialize_span(base, mgr, cxt, span, span_type);
/* XXX. Update page map entries. */
return NULL;
}
+ /* Allocate storage for the new span. */
+ if (base != NULL)
+ {
+ /* In the dynamic shared memory case, allocate from the boundary. */
+ if (mgr->boundary + pages >= mgr->npages)
+ {
+ /* Not enough pages remaining. */
+ mspan_destroy_span(base, span);
+ return NULL;
+ }
+ first_page = mgr->boundary;
+ mgr->boundary += pages;
+ }
+ else
+ {
+ /*
+ * XXX. Allocate more core via malloc. We need a system here for
+ * this. Obviously we shouldn't just allocate the smallest amount
+ * needed for this span unless that's already pretty big. Instead,
+ * we should allocate enough for this span and then throw the remainder
+ * in a bucket for later use. But the mechanism for that is not
+ * designed yet.
+ *
+ * XXX. How exactly are we going to give the segments we malloc
+ * back to the OS? How are we even going to know where they are?
+ * We can add them to the freelists as a big old span, but that's
+ * not going to help much in terms of identifying them later.
+ */
+ first_page = 0; /* XXX. Bogus. */
+ }
+
/*
- * XXX. We need more core. Allocate either from the boundary or
- * via malloc.
- */
- /*
- * XXX. How exactly are we going to give the segments we malloc
- * back to the OS? How are we even going to know where they are?
- * We can add them to the freelists as a big old span, but that's
- * not going to help much in terms of identifying them later.
+ * If this is a span-of-spans, allocate a descriptor for the new span
+ * out of the span itself. Otherwise, we
*/
+ if (span_type == MSPAN_TYPE_SPAN_OF_SPANS)
+ {
+ Assert(span == NULL);
+ span = (mspan *) (base + first_page * MSPAN_PAGE_SIZE);
+ }
+ Assert(span != NULL);
- return NULL;
+ /* Initialize the new span. */
+ span->first_page = first_page;
+ span->npages = pages;
+ mspan_initialize_span(base, mgr, cxt, span, span_type);
+
+ return span;
}
/*
return NULL;
}
+/*
+ * Initialize a span descriptor.
+ */
+static void
+mspan_initialize_span(char *base, mspan_manager *mgr, mspan_context *cxt,
+ mspan *span, uint16 span_type)
+{
+ /* The basics. */
+ span->span_type = span_type;
+ span->firstfree = MSPAN_FIRSTFREE_NONE;
+
+ /*
+ * Normally, the span starts out empty, but a span-of-spans contains
+ * its own descriptor, so it starts out containing one allocation.
+ * A span-of-spans is different in another way as well: it's managed
+ * by the manager, not the context.
+ */
+ if (span_type == MSPAN_TYPE_SPAN_OF_SPANS)
+ {
+ Assert(cxt == NULL);
+ span->ninitialized = 1;
+ span->nused = 1;
+ mspan_link_span_to_manager(base, mgr, span);
+ }
+ else
+ {
+ Assert(cxt != NULL);
+ span->ninitialized = 0;
+ span->nused = 0;
+ mspan_link_span_to_context(base, cxt, span);
+ }
+}
+
/*
* Add a span to a linked list of spans.
*
* specified by ptr.
*/
static void
-mspan_link_span_internal(char *base, Size *ptr, mspan *span)
+mspan_link_span_internal(char *base, void *parent, Size *ptr, mspan *span)
{
relptr(mspan) rptr;
Assert(relptr_is_null(span->prevspan));
#endif
+ relptr_store(base, span->parent, parent);
if (*ptr == 0)
{
relptr_store(base, span->nextspan, span);
ptr = &cxt->small_allocation[aidx].relptr_off;
}
- mspan_link_span_internal(base, ptr, span);
+ mspan_link_span_internal(base, cxt, ptr, span);
}
/*
ptr = &mgr->freelist[fidx].relptr_off;
}
- mspan_link_span_internal(base, ptr, span);
+ mspan_link_span_internal(base, mgr, ptr, span);
}
/*