Skip to content

Commit 4b8f88b

Browse files
authored
Merge pull request #4310 from RalfJung/addr-space-conservation
alloc_addresses: when we are running out of addresses, start reusing more aggressively
2 parents 84b3142 + 5c7f1d7 commit 4b8f88b

File tree

3 files changed

+17
-8
lines changed

3 files changed

+17
-8
lines changed

src/tools/miri/bench-cargo-miri/big-allocs/src/main.rs

+1-4
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,7 @@ fn main() {
77
// We can't use too big of an allocation or this code will encounter an allocation failure in
88
// CI. Since the allocation can't be huge, we need to do a few iterations so that the effect
99
// we're trying to measure is clearly visible above the interpreter's startup time.
10-
// FIXME (https://github.com/rust-lang/miri/issues/4253): On 32bit targets, we can run out of
11-
// usable addresses if we don't reuse, leading to random test failures.
12-
let count = if cfg!(target_pointer_width = "32") { 8 } else { 12 };
13-
for _ in 0..count {
10+
for _ in 0..20 {
1411
drop(Vec::<u8>::with_capacity(512 * 1024 * 1024));
1512
}
1613
}

src/tools/miri/src/alloc_addresses/mod.rs

+5
Original file line numberDiff line numberDiff line change
@@ -205,6 +205,11 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
205205
if global_state.next_base_addr > this.target_usize_max() {
206206
throw_exhaust!(AddressSpaceFull);
207207
}
208+
// If we filled up more than half the address space, start aggressively reusing
209+
// addresses to avoid running out.
210+
if global_state.next_base_addr > u64::try_from(this.target_isize_max()).unwrap() {
211+
global_state.reuse.address_space_shortage();
212+
}
208213

209214
interp_ok(base_addr)
210215
}

src/tools/miri/src/alloc_addresses/reuse_pool.rs

+11-4
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ pub struct ReusePool {
2020
/// allocations as address-size pairs, the list must be sorted by the size and then the thread ID.
2121
///
2222
/// Each of these maps has at most MAX_POOL_SIZE elements, and since alignment is limited to
23-
/// less than 64 different possible value, that bounds the overall size of the pool.
23+
/// less than 64 different possible values, that bounds the overall size of the pool.
2424
///
2525
/// We also store the ID and the data-race clock of the thread that donated this pool element,
2626
/// to ensure synchronization with the thread that picks up this address.
@@ -36,6 +36,15 @@ impl ReusePool {
3636
}
3737
}
3838

39+
/// Call this when we are using up a lot of the address space: if memory reuse is enabled at all,
40+
/// this will bump the intra-thread reuse rate to 100% so that we can keep running this program as
41+
/// long as possible.
42+
pub fn address_space_shortage(&mut self) {
43+
if self.address_reuse_rate > 0.0 {
44+
self.address_reuse_rate = 1.0;
45+
}
46+
}
47+
3948
fn subpool(&mut self, align: Align) -> &mut Vec<(u64, Size, ThreadId, VClock)> {
4049
let pool_idx: usize = align.bytes().trailing_zeros().try_into().unwrap();
4150
if self.pool.len() <= pool_idx {
@@ -55,9 +64,7 @@ impl ReusePool {
5564
clock: impl FnOnce() -> VClock,
5665
) {
5766
// Let's see if we even want to remember this address.
58-
// We don't remember stack addresses: there's a lot of them (so the perf impact is big),
59-
// and we only want to reuse stack slots within the same thread or else we'll add a lot of
60-
// undesired synchronization.
67+
// We don't remember stack addresses since there's so many of them (so the perf impact is big).
6168
if kind == MemoryKind::Stack || !rng.random_bool(self.address_reuse_rate) {
6269
return;
6370
}

0 commit comments

Comments
 (0)