|
| 1 | +use libc::c_void; |
| 2 | +use std::io; |
| 3 | +use std::ptr; |
| 4 | +use windows_sys::Win32::Foundation::BOOL; |
| 5 | +use windows_sys::Win32::System::Memory::VirtualQuery; |
| 6 | +use windows_sys::Win32::System::Threading::{ |
| 7 | + ConvertFiberToThread, ConvertThreadToFiber, CreateFiber, DeleteFiber, IsThreadAFiber, |
| 8 | + SetThreadStackGuarantee, SwitchToFiber, |
| 9 | +}; |
| 10 | + |
| 11 | +// Make sure the libstacker.a (implemented in C) is linked. |
| 12 | +// See https://github.com/rust-lang/rust/issues/65610 |
| 13 | +#[link(name = "stacker")] |
| 14 | +extern "C" { |
| 15 | + fn __stacker_get_current_fiber() -> *mut c_void; |
| 16 | +} |
| 17 | + |
| 18 | +struct FiberInfo<F> { |
| 19 | + callback: std::mem::MaybeUninit<F>, |
| 20 | + panic: Option<Box<dyn std::any::Any + Send + 'static>>, |
| 21 | + parent_fiber: *mut c_void, |
| 22 | +} |
| 23 | + |
| 24 | +unsafe extern "system" fn fiber_proc<F: FnOnce()>(data: *mut c_void) { |
| 25 | + // This function is the entry point to our inner fiber, and as argument we get an |
| 26 | + // instance of `FiberInfo`. We will set-up the "runtime" for the callback and execute |
| 27 | + // it. |
| 28 | + let data = &mut *(data as *mut FiberInfo<F>); |
| 29 | + let old_stack_limit = crate::get_stack_limit(); |
| 30 | + crate::set_stack_limit(guess_os_stack_limit()); |
| 31 | + let callback = data.callback.as_ptr(); |
| 32 | + data.panic = std::panic::catch_unwind(std::panic::AssertUnwindSafe(callback.read())).err(); |
| 33 | + |
| 34 | + // Restore to the previous Fiber |
| 35 | + crate::set_stack_limit(old_stack_limit); |
| 36 | + SwitchToFiber(data.parent_fiber); |
| 37 | +} |
| 38 | + |
| 39 | +pub fn _grow(stack_size: usize, callback: &mut dyn FnMut()) { |
| 40 | + // Fibers (or stackful coroutines) is the only official way to create new stacks on the |
| 41 | + // same thread on Windows. So in order to extend the stack we create fiber and switch |
| 42 | + // to it so we can use it's stack. After running `callback` within our fiber, we switch |
| 43 | + // back to the current stack and destroy the fiber and its associated stack. |
| 44 | + unsafe { |
| 45 | + let was_fiber = IsThreadAFiber() == 1 as BOOL; |
| 46 | + let mut data = FiberInfo { |
| 47 | + callback: std::mem::MaybeUninit::new(callback), |
| 48 | + panic: None, |
| 49 | + parent_fiber: { |
| 50 | + if was_fiber { |
| 51 | + // Get a handle to the current fiber. We need to use a C implementation |
| 52 | + // for this as GetCurrentFiber is an header only function. |
| 53 | + __stacker_get_current_fiber() |
| 54 | + } else { |
| 55 | + // Convert the current thread to a fiber, so we are able to switch back |
| 56 | + // to the current stack. Threads coverted to fibers still act like |
| 57 | + // regular threads, but they have associated fiber data. We later |
| 58 | + // convert it back to a regular thread and free the fiber data. |
| 59 | + ConvertThreadToFiber(ptr::null_mut()) |
| 60 | + } |
| 61 | + }, |
| 62 | + }; |
| 63 | + |
| 64 | + if data.parent_fiber.is_null() { |
| 65 | + panic!( |
| 66 | + "unable to convert thread to fiber: {}", |
| 67 | + io::Error::last_os_error() |
| 68 | + ); |
| 69 | + } |
| 70 | + |
| 71 | + let fiber = CreateFiber( |
| 72 | + stack_size as usize, |
| 73 | + Some(fiber_proc::<&mut dyn FnMut()>), |
| 74 | + &mut data as *mut FiberInfo<&mut dyn FnMut()> as *mut _, |
| 75 | + ); |
| 76 | + if fiber.is_null() { |
| 77 | + panic!("unable to allocate fiber: {}", io::Error::last_os_error()); |
| 78 | + } |
| 79 | + |
| 80 | + // Switch to the fiber we created. This changes stacks and starts executing |
| 81 | + // fiber_proc on it. fiber_proc will run `callback` and then switch back to run the |
| 82 | + // next statement. |
| 83 | + SwitchToFiber(fiber); |
| 84 | + DeleteFiber(fiber); |
| 85 | + |
| 86 | + // Clean-up. |
| 87 | + if !was_fiber && ConvertFiberToThread() == 0 { |
| 88 | + // FIXME: Perhaps should not panic here? |
| 89 | + panic!( |
| 90 | + "unable to convert back to thread: {}", |
| 91 | + io::Error::last_os_error() |
| 92 | + ); |
| 93 | + } |
| 94 | + |
| 95 | + if let Some(p) = data.panic { |
| 96 | + std::panic::resume_unwind(p); |
| 97 | + } |
| 98 | + } |
| 99 | +} |
| 100 | + |
| 101 | +#[inline(always)] |
| 102 | +fn get_thread_stack_guarantee() -> Option<usize> { |
| 103 | + let min_guarantee = if cfg!(target_pointer_width = "32") { |
| 104 | + 0x1000 |
| 105 | + } else { |
| 106 | + 0x2000 |
| 107 | + }; |
| 108 | + let mut stack_guarantee = 0; |
| 109 | + unsafe { |
| 110 | + // Read the current thread stack guarantee |
| 111 | + // This is the stack reserved for stack overflow |
| 112 | + // exception handling. |
| 113 | + // This doesn't return the true value so we need |
| 114 | + // some further logic to calculate the real stack |
| 115 | + // guarantee. This logic is what is used on x86-32 and |
| 116 | + // x86-64 Windows 10. Other versions and platforms may differ |
| 117 | + let ret = SetThreadStackGuarantee(&mut stack_guarantee); |
| 118 | + if ret == 0 { |
| 119 | + return None; |
| 120 | + } |
| 121 | + }; |
| 122 | + Some(std::cmp::max(stack_guarantee, min_guarantee) as usize + 0x1000) |
| 123 | +} |
| 124 | + |
| 125 | +#[inline(always)] |
| 126 | +pub unsafe fn guess_os_stack_limit() -> Option<usize> { |
| 127 | + // Query the allocation which contains our stack pointer in order |
| 128 | + // to discover the size of the stack |
| 129 | + // |
| 130 | + // FIXME: we could read stack base from the TIB, specifically the 3rd element of it. |
| 131 | + type QueryT = windows_sys::Win32::System::Memory::MEMORY_BASIC_INFORMATION; |
| 132 | + let mut mi = std::mem::MaybeUninit::<QueryT>::uninit(); |
| 133 | + let res = VirtualQuery( |
| 134 | + psm::stack_pointer() as *const _, |
| 135 | + mi.as_mut_ptr(), |
| 136 | + std::mem::size_of::<QueryT>() as usize, |
| 137 | + ); |
| 138 | + if res == 0 { |
| 139 | + return None; |
| 140 | + } |
| 141 | + Some(mi.assume_init().AllocationBase as usize + get_thread_stack_guarantee()? + 0x1000) |
| 142 | +} |
0 commit comments