Rollup merge of #127845 - workingjubilee:actually-break-up-big-ass-stack-overflow-fn, r=joboet

unix: break `stack_overflow::install_main_guard` into smaller fn

This was one big deeply-indented function for no reason. This made it hard to reason about the boundaries of its safety. Or just, y'know, read. Simplify it by splitting it into platform-specific functions, but which are still asked to keep compiling (a desirable property, since all of these OS use a similar API).

This is mostly a whitespace change, so I suggest reviewing it only after setting Files changed -> (the options gear) -> [x] Hide whitespace as that will make it easier to see how the code was actually broken up instead of raw line diffs.
This commit is contained in:
Trevor Gross 2024-07-17 19:53:28 -05:00 committed by GitHub
commit 3c4f820c5b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -44,6 +44,7 @@ mod imp {
use crate::ops::Range; use crate::ops::Range;
use crate::ptr; use crate::ptr;
use crate::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering}; use crate::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering};
use crate::sync::OnceLock;
use crate::sys::pal::unix::os; use crate::sys::pal::unix::os;
use crate::thread; use crate::thread;
@ -306,9 +307,8 @@ mod imp {
ret ret
} }
unsafe fn get_stack_start_aligned() -> Option<*mut libc::c_void> { fn stack_start_aligned(page_size: usize) -> Option<*mut libc::c_void> {
let page_size = PAGE_SIZE.load(Ordering::Relaxed); let stackptr = unsafe { get_stack_start()? };
let stackptr = get_stack_start()?;
let stackaddr = stackptr.addr(); let stackaddr = stackptr.addr();
// Ensure stackaddr is page aligned! A parent process might // Ensure stackaddr is page aligned! A parent process might
@ -325,9 +325,28 @@ mod imp {
}) })
} }
#[forbid(unsafe_op_in_unsafe_fn)]
unsafe fn install_main_guard() -> Option<Range<usize>> { unsafe fn install_main_guard() -> Option<Range<usize>> {
let page_size = PAGE_SIZE.load(Ordering::Relaxed); let page_size = PAGE_SIZE.load(Ordering::Relaxed);
unsafe {
// this way someone on any unix-y OS can check that all these compile
if cfg!(all(target_os = "linux", not(target_env = "musl"))) { if cfg!(all(target_os = "linux", not(target_env = "musl"))) {
install_main_guard_linux(page_size)
} else if cfg!(all(target_os = "linux", target_env = "musl")) {
install_main_guard_linux_musl(page_size)
} else if cfg!(target_os = "freebsd") {
install_main_guard_freebsd(page_size)
} else if cfg!(any(target_os = "netbsd", target_os = "openbsd")) {
install_main_guard_bsds(page_size)
} else {
install_main_guard_default(page_size)
}
}
}
#[forbid(unsafe_op_in_unsafe_fn)]
unsafe fn install_main_guard_linux(page_size: usize) -> Option<Range<usize>> {
// Linux doesn't allocate the whole stack right away, and // Linux doesn't allocate the whole stack right away, and
// the kernel has its own stack-guard mechanism to fault // the kernel has its own stack-guard mechanism to fault
// when growing too close to an existing mapping. If we map // when growing too close to an existing mapping. If we map
@ -338,50 +357,56 @@ mod imp {
// Instead, we'll just note where we expect rlimit to start // Instead, we'll just note where we expect rlimit to start
// faulting, so our handler can report "stack overflow", and // faulting, so our handler can report "stack overflow", and
// trust that the kernel's own stack guard will work. // trust that the kernel's own stack guard will work.
let stackptr = get_stack_start_aligned()?; let stackptr = stack_start_aligned(page_size)?;
let stackaddr = stackptr.addr(); let stackaddr = stackptr.addr();
Some(stackaddr - page_size..stackaddr) Some(stackaddr - page_size..stackaddr)
} else if cfg!(all(target_os = "linux", target_env = "musl")) { }
#[forbid(unsafe_op_in_unsafe_fn)]
unsafe fn install_main_guard_linux_musl(_page_size: usize) -> Option<Range<usize>> {
// For the main thread, the musl's pthread_attr_getstack // For the main thread, the musl's pthread_attr_getstack
// returns the current stack size, rather than maximum size // returns the current stack size, rather than maximum size
// it can eventually grow to. It cannot be used to determine // it can eventually grow to. It cannot be used to determine
// the position of kernel's stack guard. // the position of kernel's stack guard.
None None
} else if cfg!(target_os = "freebsd") { }
#[forbid(unsafe_op_in_unsafe_fn)]
unsafe fn install_main_guard_freebsd(page_size: usize) -> Option<Range<usize>> {
// FreeBSD's stack autogrows, and optionally includes a guard page // FreeBSD's stack autogrows, and optionally includes a guard page
// at the bottom. If we try to remap the bottom of the stack // at the bottom. If we try to remap the bottom of the stack
// ourselves, FreeBSD's guard page moves upwards. So we'll just use // ourselves, FreeBSD's guard page moves upwards. So we'll just use
// the builtin guard page. // the builtin guard page.
let stackptr = get_stack_start_aligned()?; let stackptr = stack_start_aligned(page_size)?;
let guardaddr = stackptr.addr(); let guardaddr = stackptr.addr();
// Technically the number of guard pages is tunable and controlled // Technically the number of guard pages is tunable and controlled
// by the security.bsd.stack_guard_page sysctl. // by the security.bsd.stack_guard_page sysctl.
// By default it is 1, checking once is enough since it is // By default it is 1, checking once is enough since it is
// a boot time config value. // a boot time config value.
static PAGES: crate::sync::OnceLock<usize> = crate::sync::OnceLock::new(); static PAGES: OnceLock<usize> = OnceLock::new();
let pages = PAGES.get_or_init(|| { let pages = PAGES.get_or_init(|| {
use crate::sys::weak::dlsym; use crate::sys::weak::dlsym;
dlsym!(fn sysctlbyname(*const libc::c_char, *mut libc::c_void, *mut libc::size_t, *const libc::c_void, libc::size_t) -> libc::c_int); dlsym!(fn sysctlbyname(*const libc::c_char, *mut libc::c_void, *mut libc::size_t, *const libc::c_void, libc::size_t) -> libc::c_int);
let mut guard: usize = 0; let mut guard: usize = 0;
let mut size = crate::mem::size_of_val(&guard); let mut size = mem::size_of_val(&guard);
let oid = crate::ffi::CStr::from_bytes_with_nul( let oid = c"security.bsd.stack_guard_page";
b"security.bsd.stack_guard_page\0",
)
.unwrap();
match sysctlbyname.get() { match sysctlbyname.get() {
Some(fcn) => { Some(fcn) if unsafe {
if fcn(oid.as_ptr(), core::ptr::addr_of_mut!(guard) as *mut _, core::ptr::addr_of_mut!(size) as *mut _, crate::ptr::null_mut(), 0) == 0 { fcn(oid.as_ptr(),
guard ptr::addr_of_mut!(guard).cast(),
} else { ptr::addr_of_mut!(size),
1 ptr::null_mut(),
} 0) == 0
}, } => guard,
_ => 1, _ => 1,
} }
}); });
Some(guardaddr..guardaddr + pages * page_size) Some(guardaddr..guardaddr + pages * page_size)
} else if cfg!(any(target_os = "openbsd", target_os = "netbsd")) { }
#[forbid(unsafe_op_in_unsafe_fn)]
unsafe fn install_main_guard_bsds(page_size: usize) -> Option<Range<usize>> {
// OpenBSD stack already includes a guard page, and stack is // OpenBSD stack already includes a guard page, and stack is
// immutable. // immutable.
// NetBSD stack includes the guard page. // NetBSD stack includes the guard page.
@ -389,10 +414,13 @@ mod imp {
// We'll just note where we expect rlimit to start // We'll just note where we expect rlimit to start
// faulting, so our handler can report "stack overflow", and // faulting, so our handler can report "stack overflow", and
// trust that the kernel's own stack guard will work. // trust that the kernel's own stack guard will work.
let stackptr = get_stack_start_aligned()?; let stackptr = stack_start_aligned(page_size)?;
let stackaddr = stackptr.addr(); let stackaddr = stackptr.addr();
Some(stackaddr - page_size..stackaddr) Some(stackaddr - page_size..stackaddr)
} else { }
#[forbid(unsafe_op_in_unsafe_fn)]
unsafe fn install_main_guard_default(page_size: usize) -> Option<Range<usize>> {
// Reallocate the last page of the stack. // Reallocate the last page of the stack.
// This ensures SIGBUS will be raised on // This ensures SIGBUS will be raised on
// stack overflow. // stack overflow.
@ -401,20 +429,22 @@ mod imp {
// than the initial mmap() used, so we mmap() here with // than the initial mmap() used, so we mmap() here with
// read/write permissions and only then mprotect() it to // read/write permissions and only then mprotect() it to
// no permissions at all. See issue #50313. // no permissions at all. See issue #50313.
let stackptr = get_stack_start_aligned()?; let stackptr = stack_start_aligned(page_size)?;
let result = mmap64( let result = unsafe {
mmap64(
stackptr, stackptr,
page_size, page_size,
PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED, MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-1, -1,
0, 0,
); )
};
if result != stackptr || result == MAP_FAILED { if result != stackptr || result == MAP_FAILED {
panic!("failed to allocate a guard page: {}", io::Error::last_os_error()); panic!("failed to allocate a guard page: {}", io::Error::last_os_error());
} }
let result = mprotect(stackptr, page_size, PROT_NONE); let result = unsafe { mprotect(stackptr, page_size, PROT_NONE) };
if result != 0 { if result != 0 {
panic!("failed to protect the guard page: {}", io::Error::last_os_error()); panic!("failed to protect the guard page: {}", io::Error::last_os_error());
} }
@ -423,7 +453,6 @@ mod imp {
Some(guardaddr..guardaddr + page_size) Some(guardaddr..guardaddr + page_size)
} }
}
#[cfg(any(target_os = "macos", target_os = "openbsd", target_os = "solaris"))] #[cfg(any(target_os = "macos", target_os = "openbsd", target_os = "solaris"))]
unsafe fn current_guard() -> Option<Range<usize>> { unsafe fn current_guard() -> Option<Range<usize>> {