*/
struct mspan_context
{
+ relptr(mspan_manager) manager;
relptr(mspan) large_allocation;
relptr(mspan) small_allocation[MSPAN_SMALL_ALLOCATION_LISTS];
};
static void mspan_destroy_span(char *base, mspan *span);
static mspan *mspan_find_free_span(char *base, mspan_manager *mgr,
Size minpages, Size maxpages);
-static void mspan_recycle_span(char *base, mspan_manager *mgr,
- mspan *span);
+static void mspan_recycle_span(char *base, mspan_manager *mgr, mspan *span);
+static void mspan_release_span(char *base, mspan_manager *mgr, mspan *span);
static void mspan_unlink_span(char *base, mspan *span);
static void mspan_update_page_map(char *base, mspan_manager *mgr,
Size first_page, Size npages, Size value);
return cxt;
}
+/*
+ * Destroy an allocation context within an address space.
+ *
+ * This releases all storage associated with the context.
+ */
+void
+mspan_context_destroy(dsm_segment *seg, mspan_context *cxt)
+{
+ char *base = (seg != NULL ? dsm_segment_address(seg) : NULL);
+ mspan_manager *mgr = relptr_access(base, cxt->manager);
+ int i;
+
+ /* Release large allocations one at a time. */
+ while (!relptr_is_null(cxt->large_allocation))
+ {
+ mspan *span = relptr_access(base, cxt->large_allocation);
+ mspan_release_span(base, mgr, span);
+ }
+
+ /* Release small allocations one superblock at a time. */
+ for (i = 0; i < MSPAN_SMALL_ALLOCATION_LISTS; ++i)
+ {
+ while (!relptr_is_null(cxt->small_allocation[i]))
+ {
+ mspan *span = relptr_access(base, cxt->small_allocation[i]);
+ mspan_release_span(base, mgr, span);
+ }
+ }
+
+ /* Put this context object back on the manager's free list. */
+ * (Size *) cxt = mgr->freecontext.relptr_off;
+ relptr_store(base, mgr->freecontext, cxt);
+}
+
/*
* Allocate new space for a new context descriptor.
*
* appropriate.
*/
mspan_update_page_map(base, mgr, pageno, 1, 0);
+ mspan_unlink_span(base, span); /* XXX Head of circular list. */
if (span->npages == 1)
mspan_destroy_span(base, span);
else
{
- mspan_unlink_span(base, span);
++span->first_page;
--span->npages;
mspan_recycle_span(base, mgr, span);
relptr_store(base, mgr->freelist[fidx], span);
}
+/*
+ * Release the memory consumed by a span, consolidating it with adjacent free
+ * spans if possible.
+ */
+static void
+mspan_release_span(char *base, mspan_manager *mgr, mspan *span)
+{
+ mspan *preceding_span = NULL;
+ mspan *following_span = NULL;
+
+ /*
+ * Find the spans that precede and follow the span to be released within
+ * the address space, if they are free. In the page map, 0 means no entry
+ * and any odd value means that the span is allocated, so we ignore those
+ * values.
+ */
+ if (span->first_page > 0)
+ {
+ relptr(mspan) p;
+
+ p.relptr_off = aspace_map_get(&mgr->page_map,
+ span->first_page - 1, base);
+ if (p.relptr_off != 0 && (p.relptr_off & 1) == 0)
+ preceding_span = relptr_access(base, p);
+ }
+ if (mgr->npages == 0 || span->first_page + span->npages < mgr->boundary)
+ {
+ relptr(mspan) f;
+
+ f.relptr_off = aspace_map_get(&mgr->page_map,
+ span->first_page + span->npages, base);
+ if (f.relptr_off != 0 && (f.relptr_off & 1) == 0)
+ following_span = relptr_access(base, f);
+ }
+
+ /*
+ * XXX. Remove this span from the list which contains it.
+ *
+ * If we're blowing away the entire context, then this will be some
+ * list of allocated objects ... and if we're freeing it because it's
+ * empty, it'll also be some list of allocated objects ... but if it's
+ * a span of spans then perhaps not.
+ */
+
+ /*
+ * XXX. Consolidate this span with the following span, if it's free.
+ */
+
+ /*
+ * XXX. Consolidate this span with the previous span, if it's free.
+ */
+
+ /*
+ * Make new page map entries for the span.
+ *
+ * Since allocated spans have page map entries with the least significant
+ * bit set, we need to make new entries regardless of whether we succeeded
+ * in consolidating with adjacent spans. If we did consolidate, we need
+ * new entries for that reason as well: the first and last pages of the
+ * new and larger span must point to the correct object. This coding may
+ * leave behind stale mappings between the first and last pages of the
+ * object, but it doesn't matter. For a free span, only the first and
+ * last pages will every be looked up in the page map; we needn't spend
+ * time fixing whatever junk entries may exist in the middle.
+ */
+ mspan_update_page_map(base, mgr, span->first_page, 1,
+ ((char *) span) - base);
+ if (span->npages > 1)
+ mspan_update_page_map(base, mgr, span->first_page + span->npages - 1,
+ 1, ((char *) span) - base);
+ mspan_recycle_span(base, mgr, span);
+}
+
/*
* Update the page map.
*/