@@ -20,7 +20,7 @@ pub struct ReusePool {
20
20
/// allocations as address-size pairs, the list must be sorted by the size and then the thread ID.
21
21
///
22
22
/// Each of these maps has at most MAX_POOL_SIZE elements, and since alignment is limited to
23
- /// less than 64 different possible value , that bounds the overall size of the pool.
23
+ /// less than 64 different possible values , that bounds the overall size of the pool.
24
24
///
25
25
/// We also store the ID and the data-race clock of the thread that donated this pool element,
26
26
/// to ensure synchronization with the thread that picks up this address.
@@ -36,6 +36,15 @@ impl ReusePool {
36
36
}
37
37
}
38
38
39
+ /// Call this when we are using up a lot of the address space: if memory reuse is enabled at all,
40
+ /// this will bump the intra-thread reuse rate to 100% so that we can keep running this program as
41
+ /// long as possible.
42
+ pub fn address_space_shortage ( & mut self ) {
43
+ if self . address_reuse_rate > 0.0 {
44
+ self . address_reuse_rate = 1.0 ;
45
+ }
46
+ }
47
+
39
48
fn subpool ( & mut self , align : Align ) -> & mut Vec < ( u64 , Size , ThreadId , VClock ) > {
40
49
let pool_idx: usize = align. bytes ( ) . trailing_zeros ( ) . try_into ( ) . unwrap ( ) ;
41
50
if self . pool . len ( ) <= pool_idx {
@@ -55,9 +64,7 @@ impl ReusePool {
55
64
clock : impl FnOnce ( ) -> VClock ,
56
65
) {
57
66
// Let's see if we even want to remember this address.
58
- // We don't remember stack addresses: there's a lot of them (so the perf impact is big),
59
- // and we only want to reuse stack slots within the same thread or else we'll add a lot of
60
- // undesired synchronization.
67
+ // We don't remember stack addresses since there's so many of them (so the perf impact is big).
61
68
if kind == MemoryKind :: Stack || !rng. random_bool ( self . address_reuse_rate ) {
62
69
return ;
63
70
}
0 commit comments