Make some linux/unix APIs better conform to strict provenance.

This largely makes the stdlib conform to strict provenance on Ubuntu.
Some hairier things have been left alone for now.
This commit is contained in:
Alexis Beingessner 2022-03-22 21:29:38 -04:00 committed by Aria Beingessner
parent 68643603ad
commit 09395f626b
5 changed files with 39 additions and 36 deletions

View File

@ -493,7 +493,7 @@ impl RawFrame {
match self { match self {
RawFrame::Actual(frame) => frame.ip(), RawFrame::Actual(frame) => frame.ip(),
#[cfg(test)] #[cfg(test)]
RawFrame::Fake => ptr::invalid_mut(1), RawFrame::Fake => crate::ptr::invalid_mut(1),
} }
} }
} }

View File

@ -17,8 +17,8 @@ mod libc {
fn sun_path_offset(addr: &libc::sockaddr_un) -> usize { fn sun_path_offset(addr: &libc::sockaddr_un) -> usize {
// Work with an actual instance of the type since using a null pointer is UB // Work with an actual instance of the type since using a null pointer is UB
let base = addr as *const _ as usize; let base = (addr as *const libc::sockaddr_un).addr();
let path = &addr.sun_path as *const _ as usize; let path = (&addr.sun_path as *const i8).addr();
path - base path - base
} }

View File

@ -9,7 +9,7 @@ pub fn memchr(needle: u8, haystack: &[u8]) -> Option<usize> {
haystack.len(), haystack.len(),
) )
}; };
if p.is_null() { None } else { Some(p as usize - (haystack.as_ptr() as usize)) } if p.is_null() { None } else { Some(p.addr() - haystack.as_ptr().addr()) }
} }
pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> { pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> {
@ -26,7 +26,7 @@ pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> {
haystack.len(), haystack.len(),
) )
}; };
if p.is_null() { None } else { Some(p as usize - (haystack.as_ptr() as usize)) } if p.is_null() { None } else { Some(p.addr() - haystack.as_ptr().addr()) }
} }
#[cfg(not(target_os = "linux"))] #[cfg(not(target_os = "linux"))]

View File

@ -505,9 +505,8 @@ pub mod guard {
#[cfg(target_os = "macos")] #[cfg(target_os = "macos")]
unsafe fn get_stack_start() -> Option<*mut libc::c_void> { unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
let th = libc::pthread_self(); let th = libc::pthread_self();
let stackaddr = let stackptr = libc::pthread_get_stackaddr_np(th);
libc::pthread_get_stackaddr_np(th) as usize - libc::pthread_get_stacksize_np(th); Some(stackptr.map_addr(|addr| addr - libc::pthread_get_stacksize_np(th)))
Some(stackaddr as *mut libc::c_void)
} }
#[cfg(target_os = "openbsd")] #[cfg(target_os = "openbsd")]
@ -515,14 +514,15 @@ pub mod guard {
let mut current_stack: libc::stack_t = crate::mem::zeroed(); let mut current_stack: libc::stack_t = crate::mem::zeroed();
assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0); assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0);
let stack_ptr = current_stack.ss_sp;
let stackaddr = if libc::pthread_main_np() == 1 { let stackaddr = if libc::pthread_main_np() == 1 {
// main thread // main thread
current_stack.ss_sp as usize - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed) stack_ptr.addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed)
} else { } else {
// new thread // new thread
current_stack.ss_sp as usize - current_stack.ss_size stack_ptr.addr() - current_stack.ss_size
}; };
Some(stackaddr as *mut libc::c_void) Some(stack_ptr.with_addr(stack_addr))
} }
#[cfg(any( #[cfg(any(
@ -557,7 +557,8 @@ pub mod guard {
unsafe fn get_stack_start_aligned() -> Option<*mut libc::c_void> { unsafe fn get_stack_start_aligned() -> Option<*mut libc::c_void> {
let page_size = PAGE_SIZE.load(Ordering::Relaxed); let page_size = PAGE_SIZE.load(Ordering::Relaxed);
assert!(page_size != 0); assert!(page_size != 0);
let stackaddr = get_stack_start()?; let stackptr = get_stack_start()?;
let stackaddr = stackptr.addr();
// Ensure stackaddr is page aligned! A parent process might // Ensure stackaddr is page aligned! A parent process might
// have reset RLIMIT_STACK to be non-page aligned. The // have reset RLIMIT_STACK to be non-page aligned. The
@ -565,11 +566,11 @@ pub mod guard {
// stackaddr < stackaddr + stacksize, so if stackaddr is not // stackaddr < stackaddr + stacksize, so if stackaddr is not
// page-aligned, calculate the fix such that stackaddr < // page-aligned, calculate the fix such that stackaddr <
// new_page_aligned_stackaddr < stackaddr + stacksize // new_page_aligned_stackaddr < stackaddr + stacksize
let remainder = (stackaddr as usize) % page_size; let remainder = stackaddr % page_size;
Some(if remainder == 0 { Some(if remainder == 0 {
stackaddr stackptr
} else { } else {
((stackaddr as usize) + page_size - remainder) as *mut libc::c_void stackptr.with_addr(stackaddr + page_size - remainder)
}) })
} }
@ -588,8 +589,8 @@ pub mod guard {
// Instead, we'll just note where we expect rlimit to start // Instead, we'll just note where we expect rlimit to start
// faulting, so our handler can report "stack overflow", and // faulting, so our handler can report "stack overflow", and
// trust that the kernel's own stack guard will work. // trust that the kernel's own stack guard will work.
let stackaddr = get_stack_start_aligned()?; let stackptr = get_stack_start_aligned()?;
let stackaddr = stackaddr as usize; let stackaddr = stackptr.addr();
Some(stackaddr - page_size..stackaddr) Some(stackaddr - page_size..stackaddr)
} else if cfg!(all(target_os = "linux", target_env = "musl")) { } else if cfg!(all(target_os = "linux", target_env = "musl")) {
// For the main thread, the musl's pthread_attr_getstack // For the main thread, the musl's pthread_attr_getstack
@ -602,8 +603,8 @@ pub mod guard {
// at the bottom. If we try to remap the bottom of the stack // at the bottom. If we try to remap the bottom of the stack
// ourselves, FreeBSD's guard page moves upwards. So we'll just use // ourselves, FreeBSD's guard page moves upwards. So we'll just use
// the builtin guard page. // the builtin guard page.
let stackaddr = get_stack_start_aligned()?; let stackptr = get_stack_start_aligned()?;
let guardaddr = stackaddr as usize; let guardaddr = stackptr.addr();
// Technically the number of guard pages is tunable and controlled // Technically the number of guard pages is tunable and controlled
// by the security.bsd.stack_guard_page sysctl, but there are // by the security.bsd.stack_guard_page sysctl, but there are
// few reasons to change it from the default. The default value has // few reasons to change it from the default. The default value has
@ -620,25 +621,25 @@ pub mod guard {
// than the initial mmap() used, so we mmap() here with // than the initial mmap() used, so we mmap() here with
// read/write permissions and only then mprotect() it to // read/write permissions and only then mprotect() it to
// no permissions at all. See issue #50313. // no permissions at all. See issue #50313.
let stackaddr = get_stack_start_aligned()?; let stackptr = get_stack_start_aligned()?;
let result = mmap( let result = mmap(
stackaddr, stackptr,
page_size, page_size,
PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED, MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-1, -1,
0, 0,
); );
if result != stackaddr || result == MAP_FAILED { if result != stackptr || result == MAP_FAILED {
panic!("failed to allocate a guard page: {}", io::Error::last_os_error()); panic!("failed to allocate a guard page: {}", io::Error::last_os_error());
} }
let result = mprotect(stackaddr, page_size, PROT_NONE); let result = mprotect(stackptr, page_size, PROT_NONE);
if result != 0 { if result != 0 {
panic!("failed to protect the guard page: {}", io::Error::last_os_error()); panic!("failed to protect the guard page: {}", io::Error::last_os_error());
} }
let guardaddr = stackaddr as usize; let guardaddr = stackptr.addr();
Some(guardaddr..guardaddr + page_size) Some(guardaddr..guardaddr + page_size)
} }
@ -646,7 +647,8 @@ pub mod guard {
#[cfg(any(target_os = "macos", target_os = "openbsd", target_os = "solaris"))] #[cfg(any(target_os = "macos", target_os = "openbsd", target_os = "solaris"))]
pub unsafe fn current() -> Option<Guard> { pub unsafe fn current() -> Option<Guard> {
let stackaddr = get_stack_start()? as usize; let stackptr = get_stack_start()?;
let stackaddr = stackptr.addr();
Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr) Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr)
} }
@ -679,11 +681,11 @@ pub mod guard {
panic!("there is no guard page"); panic!("there is no guard page");
} }
} }
let mut stackaddr = crate::ptr::null_mut(); let mut stackptr = crate::ptr::null_mut::<libc::c_void>();
let mut size = 0; let mut size = 0;
assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackaddr, &mut size), 0); assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackptr, &mut size), 0);
let stackaddr = stackaddr as usize; let stackaddr = stackptr.addr();
ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd")) { ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd")) {
Some(stackaddr - guardsize..stackaddr) Some(stackaddr - guardsize..stackaddr)
} else if cfg!(all(target_os = "linux", target_env = "musl")) { } else if cfg!(all(target_os = "linux", target_env = "musl")) {

View File

@ -22,9 +22,10 @@
// that, we'll just allow that some unix targets don't use this module at all. // that, we'll just allow that some unix targets don't use this module at all.
#![allow(dead_code, unused_macros)] #![allow(dead_code, unused_macros)]
use crate::ffi::CStr; use crate::ffi::{c_void, CStr};
use crate::marker::PhantomData; use crate::marker::PhantomData;
use crate::mem; use crate::mem;
use crate::ptr;
use crate::sync::atomic::{self, AtomicUsize, Ordering}; use crate::sync::atomic::{self, AtomicUsize, Ordering};
// We can use true weak linkage on ELF targets. // We can use true weak linkage on ELF targets.
@ -129,25 +130,25 @@ impl<F> DlsymWeak<F> {
// Cold because it should only happen during first-time initialization. // Cold because it should only happen during first-time initialization.
#[cold] #[cold]
unsafe fn initialize(&self) -> Option<F> { unsafe fn initialize(&self) -> Option<F> {
assert_eq!(mem::size_of::<F>(), mem::size_of::<usize>()); assert_eq!(mem::size_of::<F>(), mem::size_of::<*mut ()>());
let val = fetch(self.name); let val = fetch(self.name);
// This synchronizes with the acquire fence in `get`. // This synchronizes with the acquire fence in `get`.
self.addr.store(val, Ordering::Release); self.addr.store(val.addr(), Ordering::Release);
match val { match val.addr() {
0 => None, 0 => None,
addr => Some(mem::transmute_copy::<usize, F>(&addr)), _ => Some(mem::transmute_copy::<*mut c_void, F>(&val)),
} }
} }
} }
unsafe fn fetch(name: &str) -> usize { unsafe fn fetch(name: &str) -> *mut c_void {
let name = match CStr::from_bytes_with_nul(name.as_bytes()) { let name = match CStr::from_bytes_with_nul(name.as_bytes()) {
Ok(cstr) => cstr, Ok(cstr) => cstr,
Err(..) => return 0, Err(..) => return ptr::null_mut(),
}; };
libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr()) as usize libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr())
} }
#[cfg(not(any(target_os = "linux", target_os = "android")))] #[cfg(not(any(target_os = "linux", target_os = "android")))]