2013-02-03 20:15:43 -06:00
|
|
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
|
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2013-03-12 02:48:41 -05:00
|
|
|
use option::*;
|
2013-02-03 20:15:43 -06:00
|
|
|
use super::stack::StackSegment;
|
|
|
|
use libc::c_void;
|
2013-10-17 03:40:33 -05:00
|
|
|
use uint;
|
2013-02-03 20:15:43 -06:00
|
|
|
use cast::{transmute, transmute_mut_unsafe,
|
|
|
|
transmute_region, transmute_mut_region};
|
|
|
|
|
2013-10-17 03:40:33 -05:00
|
|
|
pub static RED_ZONE: uint = 20 * 1024;
|
|
|
|
|
2013-07-12 16:43:57 -05:00
|
|
|
// FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing
|
2013-02-03 20:15:43 -06:00
|
|
|
// SSE regs. It would be marginally better not to do this. In C++ we
|
|
|
|
// use an attribute on a struct.
|
2013-07-12 16:43:57 -05:00
|
|
|
// FIXME #7761: It would be nice to define regs as `~Option<Registers>` since
|
2013-03-12 02:48:41 -05:00
|
|
|
// the registers are sometimes empty, but the discriminant would
|
|
|
|
// then misalign the regs again.
|
|
|
|
pub struct Context {
|
|
|
|
/// The context entry point, saved here for later destruction
|
2013-10-19 19:33:09 -05:00
|
|
|
priv start: Option<~~fn()>,
|
2013-03-12 02:48:41 -05:00
|
|
|
/// Hold the registers while the task or scheduler is suspended
|
2013-10-19 19:33:09 -05:00
|
|
|
priv regs: ~Registers,
|
2013-10-17 03:40:33 -05:00
|
|
|
/// Lower bound and upper bound for the stack
|
2013-10-19 19:33:09 -05:00
|
|
|
priv stack_bounds: Option<(uint, uint)>,
|
2013-03-12 02:48:41 -05:00
|
|
|
}
|
2013-02-03 20:15:43 -06:00
|
|
|
|
2013-05-31 17:17:22 -05:00
|
|
|
impl Context {
|
|
|
|
pub fn empty() -> Context {
|
2013-03-12 02:48:41 -05:00
|
|
|
Context {
|
|
|
|
start: None,
|
2013-10-17 03:40:33 -05:00
|
|
|
regs: new_regs(),
|
|
|
|
stack_bounds: None,
|
2013-03-12 02:48:41 -05:00
|
|
|
}
|
2013-02-03 20:15:43 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Create a new context that will resume execution by running ~fn()
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn new(start: ~fn(), stack: &mut StackSegment) -> Context {
|
2013-07-12 16:43:57 -05:00
|
|
|
// FIXME #7767: Putting main into a ~ so it's a thin pointer and can
|
2013-03-12 02:48:41 -05:00
|
|
|
// be passed to the spawn function. Another unfortunate
|
|
|
|
// allocation
|
|
|
|
let start = ~start;
|
2013-02-03 20:15:43 -06:00
|
|
|
|
|
|
|
// The C-ABI function that is the task entry point
|
|
|
|
extern fn task_start_wrapper(f: &~fn()) { (*f)() }
|
|
|
|
|
|
|
|
let fp: *c_void = task_start_wrapper as *c_void;
|
|
|
|
let argp: *c_void = unsafe { transmute::<&~fn(), *c_void>(&*start) };
|
|
|
|
let sp: *uint = stack.end();
|
|
|
|
let sp: *mut uint = unsafe { transmute_mut_unsafe(sp) };
|
|
|
|
// Save and then immediately load the current context,
|
|
|
|
// which we will then modify to call the given function when restored
|
|
|
|
let mut regs = new_regs();
|
|
|
|
unsafe {
|
2013-07-19 16:25:05 -05:00
|
|
|
swap_registers(transmute_mut_region(&mut *regs), transmute_region(&*regs));
|
2013-02-03 20:15:43 -06:00
|
|
|
};
|
|
|
|
|
2013-10-17 03:40:33 -05:00
|
|
|
initialize_call_frame(&mut *regs, fp, argp, sp);
|
2013-02-03 20:15:43 -06:00
|
|
|
|
2013-10-17 03:40:33 -05:00
|
|
|
// Scheduler tasks don't have a stack in the "we allocated it" sense,
|
|
|
|
// but rather they run on pthreads stacks. We have complete control over
|
|
|
|
// them in terms of the code running on them (and hopefully they don't
|
|
|
|
// overflow). Additionally, their coroutine stacks are listed as being
|
|
|
|
// zero-length, so that's how we detect what's what here.
|
|
|
|
let stack_base: *uint = stack.start();
|
|
|
|
let bounds = if sp as uint == stack_base as uint {
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
Some((stack_base as uint, sp as uint))
|
|
|
|
};
|
2013-03-12 02:48:41 -05:00
|
|
|
return Context {
|
|
|
|
start: Some(start),
|
2013-10-17 03:40:33 -05:00
|
|
|
regs: regs,
|
|
|
|
stack_bounds: bounds,
|
2013-03-12 02:48:41 -05:00
|
|
|
}
|
2013-02-03 20:15:43 -06:00
|
|
|
}
|
|
|
|
|
2013-03-12 02:48:41 -05:00
|
|
|
/* Switch contexts
|
|
|
|
|
|
|
|
Suspend the current execution context and resume another by
|
|
|
|
saving the registers values of the executing thread to a Context
|
|
|
|
then loading the registers from a previously saved Context.
|
|
|
|
*/
|
2013-05-31 17:17:22 -05:00
|
|
|
pub fn swap(out_context: &mut Context, in_context: &Context) {
|
2013-07-19 16:25:05 -05:00
|
|
|
rtdebug!("swapping contexts");
|
2013-02-03 20:15:43 -06:00
|
|
|
let out_regs: &mut Registers = match out_context {
|
2013-03-12 02:48:41 -05:00
|
|
|
&Context { regs: ~ref mut r, _ } => r
|
2013-02-03 20:15:43 -06:00
|
|
|
};
|
|
|
|
let in_regs: &Registers = match in_context {
|
2013-03-12 02:48:41 -05:00
|
|
|
&Context { regs: ~ref r, _ } => r
|
2013-02-03 20:15:43 -06:00
|
|
|
};
|
2013-10-17 03:40:33 -05:00
|
|
|
|
|
|
|
rtdebug!("noting the stack limit and doing raw swap");
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
// Right before we switch to the new context, set the new context's
|
|
|
|
// stack limit in the OS-specified TLS slot. This also means that
|
|
|
|
// we cannot call any more rust functions after record_stack_bounds
|
|
|
|
// returns because they would all likely fail due to the limit being
|
|
|
|
// invalid for the current task. Lucky for us `swap_registers` is a
|
|
|
|
// C function so we don't have to worry about that!
|
|
|
|
match in_context.stack_bounds {
|
|
|
|
Some((lo, hi)) => record_stack_bounds(lo, hi),
|
|
|
|
// If we're going back to one of the original contexts or
|
|
|
|
// something that's possibly not a "normal task", then reset
|
|
|
|
// the stack limit to 0 to make morestack never fail
|
|
|
|
None => record_stack_bounds(0, uint::max_value),
|
|
|
|
}
|
|
|
|
swap_registers(out_regs, in_regs)
|
|
|
|
}
|
2013-02-03 20:15:43 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
extern {
|
|
|
|
fn swap_registers(out_regs: *mut Registers, in_regs: *Registers);
|
|
|
|
}
|
|
|
|
|
2013-10-17 03:40:33 -05:00
|
|
|
// Register contexts used in various architectures
|
|
|
|
//
|
|
|
|
// These structures all represent a context of one task throughout its
|
|
|
|
// execution. Each struct is a representation of the architecture's register
|
|
|
|
// set. When swapping between tasks, these register sets are used to save off
|
|
|
|
// the current registers into one struct, and load them all from another.
|
|
|
|
//
|
|
|
|
// Note that this is only used for context switching, which means that some of
|
|
|
|
// the registers may go unused. For example, for architectures with
|
|
|
|
// callee/caller saved registers, the context will only reflect the callee-saved
|
|
|
|
// registers. This is because the caller saved registers are already stored
|
|
|
|
// elsewhere on the stack (if it was necessary anyway).
|
|
|
|
//
|
|
|
|
// Additionally, there may be fields on various architectures which are unused
|
|
|
|
// entirely because they only reflect what is theoretically possible for a
|
|
|
|
// "complete register set" to show, but user-space cannot alter these registers.
|
|
|
|
// An example of this would be the segment selectors for x86.
|
|
|
|
//
|
|
|
|
// These structures/functions are roughly in-sync with the source files inside
|
|
|
|
// of src/rt/arch/$arch. The only currently used function from those folders is
|
|
|
|
// the `swap_registers` function, but that's only because for now segmented
|
|
|
|
// stacks are disabled.
|
|
|
|
|
2013-03-12 01:17:40 -05:00
|
|
|
#[cfg(target_arch = "x86")]
|
|
|
|
struct Registers {
|
|
|
|
eax: u32, ebx: u32, ecx: u32, edx: u32,
|
|
|
|
ebp: u32, esi: u32, edi: u32, esp: u32,
|
|
|
|
cs: u16, ds: u16, ss: u16, es: u16, fs: u16, gs: u16,
|
|
|
|
eflags: u32, eip: u32
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(target_arch = "x86")]
|
|
|
|
fn new_regs() -> ~Registers {
|
|
|
|
~Registers {
|
|
|
|
eax: 0, ebx: 0, ecx: 0, edx: 0,
|
|
|
|
ebp: 0, esi: 0, edi: 0, esp: 0,
|
|
|
|
cs: 0, ds: 0, ss: 0, es: 0, fs: 0, gs: 0,
|
|
|
|
eflags: 0, eip: 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(target_arch = "x86")]
|
2013-08-26 08:01:55 -05:00
|
|
|
fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void,
|
2013-10-17 03:40:33 -05:00
|
|
|
sp: *mut uint) {
|
2013-03-12 01:17:40 -05:00
|
|
|
|
|
|
|
let sp = align_down(sp);
|
2013-03-12 02:48:41 -05:00
|
|
|
let sp = mut_offset(sp, -4);
|
2013-03-12 01:17:40 -05:00
|
|
|
|
2013-04-27 04:07:32 -05:00
|
|
|
unsafe { *sp = arg as uint };
|
2013-03-12 01:17:40 -05:00
|
|
|
let sp = mut_offset(sp, -1);
|
2013-04-27 04:07:32 -05:00
|
|
|
unsafe { *sp = 0 }; // The final return address
|
2013-03-12 01:17:40 -05:00
|
|
|
|
|
|
|
regs.esp = sp as u32;
|
|
|
|
regs.eip = fptr as u32;
|
|
|
|
|
|
|
|
// Last base pointer on the stack is 0
|
|
|
|
regs.ebp = 0;
|
|
|
|
}
|
|
|
|
|
2013-10-17 03:40:33 -05:00
|
|
|
// windows requires saving more registers (both general and XMM), so the windows
|
|
|
|
// register context must be larger.
|
2013-08-26 08:01:55 -05:00
|
|
|
#[cfg(windows, target_arch = "x86_64")]
|
|
|
|
type Registers = [uint, ..34];
|
|
|
|
#[cfg(not(windows), target_arch = "x86_64")]
|
2013-03-22 20:52:04 -05:00
|
|
|
type Registers = [uint, ..22];
|
2013-02-03 20:15:43 -06:00
|
|
|
|
2013-08-26 08:01:55 -05:00
|
|
|
#[cfg(windows, target_arch = "x86_64")]
|
|
|
|
fn new_regs() -> ~Registers { ~([0, .. 34]) }
|
|
|
|
#[cfg(not(windows), target_arch = "x86_64")]
|
2013-03-27 15:53:03 -05:00
|
|
|
fn new_regs() -> ~Registers { ~([0, .. 22]) }
|
2013-02-03 20:15:43 -06:00
|
|
|
|
|
|
|
#[cfg(target_arch = "x86_64")]
|
2013-08-26 08:01:55 -05:00
|
|
|
fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void,
|
2013-10-17 03:40:33 -05:00
|
|
|
sp: *mut uint) {
|
2013-02-03 20:15:43 -06:00
|
|
|
|
2013-10-17 03:40:33 -05:00
|
|
|
// Redefinitions from rt/arch/x86_64/regs.h
|
2013-03-22 16:00:15 -05:00
|
|
|
static RUSTRT_ARG0: uint = 3;
|
|
|
|
static RUSTRT_RSP: uint = 1;
|
|
|
|
static RUSTRT_IP: uint = 8;
|
|
|
|
static RUSTRT_RBP: uint = 2;
|
2013-02-03 20:15:43 -06:00
|
|
|
|
|
|
|
let sp = align_down(sp);
|
|
|
|
let sp = mut_offset(sp, -1);
|
|
|
|
|
|
|
|
// The final return address. 0 indicates the bottom of the stack
|
|
|
|
unsafe { *sp = 0; }
|
|
|
|
|
|
|
|
rtdebug!("creating call frame");
|
2013-10-17 03:40:33 -05:00
|
|
|
rtdebug!("fptr {}", fptr);
|
|
|
|
rtdebug!("arg {}", arg);
|
|
|
|
rtdebug!("sp {}", sp);
|
2013-02-03 20:15:43 -06:00
|
|
|
|
|
|
|
regs[RUSTRT_ARG0] = arg as uint;
|
|
|
|
regs[RUSTRT_RSP] = sp as uint;
|
|
|
|
regs[RUSTRT_IP] = fptr as uint;
|
|
|
|
|
|
|
|
// Last base pointer on the stack should be 0
|
|
|
|
regs[RUSTRT_RBP] = 0;
|
|
|
|
}
|
|
|
|
|
2013-03-12 01:17:40 -05:00
|
|
|
#[cfg(target_arch = "arm")]
|
2013-03-22 20:52:04 -05:00
|
|
|
type Registers = [uint, ..32];
|
2013-02-03 20:15:43 -06:00
|
|
|
|
2013-03-12 01:17:40 -05:00
|
|
|
#[cfg(target_arch = "arm")]
|
2013-03-29 01:46:13 -05:00
|
|
|
fn new_regs() -> ~Registers { ~([0, .. 32]) }
|
2013-02-03 20:15:43 -06:00
|
|
|
|
2013-03-12 01:17:40 -05:00
|
|
|
#[cfg(target_arch = "arm")]
|
2013-08-26 08:01:55 -05:00
|
|
|
fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void,
|
2013-10-17 03:40:33 -05:00
|
|
|
sp: *mut uint) {
|
2013-05-21 13:14:22 -05:00
|
|
|
let sp = align_down(sp);
|
|
|
|
// sp of arm eabi is 8-byte aligned
|
|
|
|
let sp = mut_offset(sp, -2);
|
2013-02-03 20:15:43 -06:00
|
|
|
|
2013-03-12 01:17:40 -05:00
|
|
|
// The final return address. 0 indicates the bottom of the stack
|
|
|
|
unsafe { *sp = 0; }
|
2013-02-03 20:15:43 -06:00
|
|
|
|
2013-03-12 01:17:40 -05:00
|
|
|
regs[0] = arg as uint; // r0
|
|
|
|
regs[13] = sp as uint; // #53 sp, r13
|
|
|
|
regs[14] = fptr as uint; // #60 pc, r15 --> lr
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(target_arch = "mips")]
|
2013-03-22 20:52:04 -05:00
|
|
|
type Registers = [uint, ..32];
|
2013-03-12 01:17:40 -05:00
|
|
|
|
|
|
|
#[cfg(target_arch = "mips")]
|
2013-03-29 01:46:13 -05:00
|
|
|
fn new_regs() -> ~Registers { ~([0, .. 32]) }
|
2013-03-12 01:17:40 -05:00
|
|
|
|
|
|
|
#[cfg(target_arch = "mips")]
|
2013-08-26 08:01:55 -05:00
|
|
|
fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void,
|
2013-10-17 03:40:33 -05:00
|
|
|
sp: *mut uint) {
|
2013-05-20 22:58:08 -05:00
|
|
|
let sp = align_down(sp);
|
|
|
|
// sp of mips o32 is 8-byte aligned
|
|
|
|
let sp = mut_offset(sp, -2);
|
2013-02-03 20:15:43 -06:00
|
|
|
|
2013-03-12 01:17:40 -05:00
|
|
|
// The final return address. 0 indicates the bottom of the stack
|
|
|
|
unsafe { *sp = 0; }
|
2013-02-03 20:15:43 -06:00
|
|
|
|
2013-03-12 01:17:40 -05:00
|
|
|
regs[4] = arg as uint;
|
|
|
|
regs[29] = sp as uint;
|
2013-03-19 00:34:12 -05:00
|
|
|
regs[25] = fptr as uint;
|
2013-03-12 01:17:40 -05:00
|
|
|
regs[31] = fptr as uint;
|
2013-02-03 20:15:43 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
fn align_down(sp: *mut uint) -> *mut uint {
|
|
|
|
unsafe {
|
2013-04-27 04:07:32 -05:00
|
|
|
let sp: uint = transmute(sp);
|
2013-02-03 20:15:43 -06:00
|
|
|
let sp = sp & !(16 - 1);
|
|
|
|
transmute::<uint, *mut uint>(sp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-12 16:43:57 -05:00
|
|
|
// ptr::mut_offset is positive ints only
|
2013-06-18 16:45:18 -05:00
|
|
|
#[inline]
|
2013-03-21 23:20:48 -05:00
|
|
|
pub fn mut_offset<T>(ptr: *mut T, count: int) -> *mut T {
|
2013-10-16 20:34:01 -05:00
|
|
|
use mem::size_of;
|
2013-04-09 00:31:23 -05:00
|
|
|
(ptr as int + count * (size_of::<T>() as int)) as *mut T
|
2013-02-03 20:15:43 -06:00
|
|
|
}
|
2013-10-17 03:40:33 -05:00
|
|
|
|
|
|
|
#[inline(always)]
|
|
|
|
pub unsafe fn record_stack_bounds(stack_lo: uint, stack_hi: uint) {
|
|
|
|
// When the old runtime had segmented stacks, it used a calculation that was
|
|
|
|
// "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
|
|
|
|
// symbol resolution, llvm function calls, etc. In theory this red zone
|
|
|
|
// value is 0, but it matters far less when we have gigantic stacks because
|
|
|
|
// we don't need to be so exact about our stack budget. The "fudge factor"
|
|
|
|
// was because LLVM doesn't emit a stack check for functions < 256 bytes in
|
|
|
|
// size. Again though, we have giant stacks, so we round all these
|
|
|
|
// calculations up to the nice round number of 20k.
|
|
|
|
record_sp_limit(stack_lo + RED_ZONE);
|
|
|
|
|
|
|
|
return target_record_stack_bounds(stack_lo, stack_hi);
|
|
|
|
|
|
|
|
#[cfg(not(windows))] #[cfg(not(target_arch = "x86_64"))] #[inline(always)]
|
|
|
|
unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {}
|
|
|
|
#[cfg(windows, target_arch = "x86_64")] #[inline(always)]
|
|
|
|
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
|
|
|
|
// Windows compiles C functions which may check the stack bounds. This
|
|
|
|
// means that if we want to perform valid FFI on windows, then we need
|
|
|
|
// to ensure that the stack bounds are what they truly are for this
|
|
|
|
// task. More info can be found at:
|
|
|
|
// https://github.com/mozilla/rust/issues/3445#issuecomment-26114839
|
|
|
|
//
|
|
|
|
// stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
|
|
|
|
asm!("mov $0, %gs:0x08" :: "r"(stack_lo) :: "volatile");
|
|
|
|
asm!("mov $0, %gs:0x10" :: "r"(stack_hi) :: "volatile");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Records the current limit of the stack as specified by `end`.
|
|
|
|
///
|
|
|
|
/// This is stored in an OS-dependent location, likely inside of the thread
|
|
|
|
/// local storage. The location that the limit is stored is a pre-ordained
|
|
|
|
/// location because it's where LLVM has emitted code to check.
|
|
|
|
///
|
|
|
|
/// Note that this cannot be called under normal circumstances. This function is
|
|
|
|
/// changing the stack limit, so upon returning any further function calls will
|
|
|
|
/// possibly be triggering the morestack logic if you're not careful.
|
|
|
|
///
|
|
|
|
/// Also note that this and all of the inside functions are all flagged as
|
|
|
|
/// "inline(always)" because they're messing around with the stack limits. This
|
|
|
|
/// would be unfortunate for the functions themselves to trigger a morestack
|
|
|
|
/// invocation (if they were an actual function call).
|
|
|
|
#[inline(always)]
|
|
|
|
pub unsafe fn record_sp_limit(limit: uint) {
|
|
|
|
return target_record_sp_limit(limit);
|
|
|
|
|
|
|
|
// x86-64
|
|
|
|
#[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)]
|
|
|
|
unsafe fn target_record_sp_limit(limit: uint) {
|
|
|
|
asm!("movq $$0x60+90*8, %rsi
|
|
|
|
movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
|
|
|
|
}
|
|
|
|
#[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)]
|
|
|
|
unsafe fn target_record_sp_limit(limit: uint) {
|
|
|
|
asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
|
|
|
|
}
|
|
|
|
#[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)]
|
|
|
|
unsafe fn target_record_sp_limit(limit: uint) {
|
|
|
|
// see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block
|
|
|
|
// store this inside of the "arbitrary data slot", but double the size
|
|
|
|
// because this is 64 bit instead of 32 bit
|
|
|
|
asm!("movq $0, %gs:0x28" :: "r"(limit) :: "volatile")
|
|
|
|
}
|
|
|
|
#[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)]
|
|
|
|
unsafe fn target_record_sp_limit(limit: uint) {
|
|
|
|
asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
|
|
|
|
}
|
|
|
|
|
|
|
|
// x86
|
|
|
|
#[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)]
|
|
|
|
unsafe fn target_record_sp_limit(limit: uint) {
|
|
|
|
asm!("movl $$0x48+90*4, %eax
|
|
|
|
movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
|
|
|
|
}
|
|
|
|
#[cfg(target_arch = "x86", target_os = "linux")]
|
|
|
|
#[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)]
|
|
|
|
unsafe fn target_record_sp_limit(limit: uint) {
|
|
|
|
asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
|
|
|
|
}
|
|
|
|
#[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)]
|
|
|
|
unsafe fn target_record_sp_limit(limit: uint) {
|
|
|
|
// see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block
|
|
|
|
// store this inside of the "arbitrary data slot"
|
|
|
|
asm!("movl $0, %fs:0x14" :: "r"(limit) :: "volatile")
|
|
|
|
}
|
|
|
|
|
|
|
|
// mips, arm - Some brave soul can port these to inline asm, but it's over
|
|
|
|
// my head personally
|
|
|
|
#[cfg(target_arch = "mips")]
|
|
|
|
#[cfg(target_arch = "arm")] #[inline(always)]
|
|
|
|
unsafe fn target_record_sp_limit(limit: uint) {
|
|
|
|
return record_sp_limit(limit as *c_void);
|
|
|
|
extern {
|
|
|
|
fn record_sp_limit(limit: *c_void);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// The counterpart of the function above, this function will fetch the current
|
|
|
|
/// stack limit stored in TLS.
|
|
|
|
///
|
|
|
|
/// Note that all of these functions are meant to be exact counterparts of their
|
|
|
|
/// brethren above, except that the operands are reversed.
|
|
|
|
///
|
|
|
|
/// As with the setter, this function does not have a __morestack header and can
|
|
|
|
/// therefore be called in a "we're out of stack" situation.
|
|
|
|
#[inline(always)]
|
|
|
|
pub unsafe fn get_sp_limit() -> uint {
|
|
|
|
return target_get_sp_limit();
|
|
|
|
|
|
|
|
// x86-64
|
|
|
|
#[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)]
|
|
|
|
unsafe fn target_get_sp_limit() -> uint {
|
2013-10-21 16:16:38 -05:00
|
|
|
let limit;
|
2013-10-17 03:40:33 -05:00
|
|
|
asm!("movq $$0x60+90*8, %rsi
|
|
|
|
movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile");
|
|
|
|
return limit;
|
|
|
|
}
|
|
|
|
#[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)]
|
|
|
|
unsafe fn target_get_sp_limit() -> uint {
|
2013-10-21 16:16:38 -05:00
|
|
|
let limit;
|
2013-10-17 03:40:33 -05:00
|
|
|
asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile");
|
|
|
|
return limit;
|
|
|
|
}
|
|
|
|
#[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)]
|
|
|
|
unsafe fn target_get_sp_limit() -> uint {
|
2013-10-21 16:16:38 -05:00
|
|
|
let limit;
|
2013-10-17 03:40:33 -05:00
|
|
|
asm!("movq %gs:0x28, $0" : "=r"(limit) ::: "volatile");
|
|
|
|
return limit;
|
|
|
|
}
|
|
|
|
#[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)]
|
|
|
|
unsafe fn target_get_sp_limit() -> uint {
|
2013-10-21 16:16:38 -05:00
|
|
|
let limit;
|
2013-10-17 03:40:33 -05:00
|
|
|
asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile");
|
|
|
|
return limit;
|
|
|
|
}
|
|
|
|
|
|
|
|
// x86
|
|
|
|
#[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)]
|
|
|
|
unsafe fn target_get_sp_limit() -> uint {
|
2013-10-21 16:16:38 -05:00
|
|
|
let limit;
|
2013-10-17 03:40:33 -05:00
|
|
|
asm!("movl $$0x48+90*4, %eax
|
|
|
|
movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile");
|
|
|
|
return limit;
|
|
|
|
}
|
|
|
|
#[cfg(target_arch = "x86", target_os = "linux")]
|
|
|
|
#[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)]
|
|
|
|
unsafe fn target_get_sp_limit() -> uint {
|
2013-10-21 16:16:38 -05:00
|
|
|
let limit;
|
2013-10-17 03:40:33 -05:00
|
|
|
asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile");
|
|
|
|
return limit;
|
|
|
|
}
|
|
|
|
#[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)]
|
|
|
|
unsafe fn target_get_sp_limit() -> uint {
|
2013-10-21 16:16:38 -05:00
|
|
|
let limit;
|
2013-10-17 03:40:33 -05:00
|
|
|
asm!("movl %fs:0x14, $0" : "=r"(limit) ::: "volatile");
|
|
|
|
return limit;
|
|
|
|
}
|
|
|
|
|
|
|
|
// mips, arm - Some brave soul can port these to inline asm, but it's over
|
|
|
|
// my head personally
|
|
|
|
#[cfg(target_arch = "mips")]
|
|
|
|
#[cfg(target_arch = "arm")] #[inline(always)]
|
|
|
|
unsafe fn target_get_sp_limit() -> uint {
|
|
|
|
return get_sp_limit() as uint;
|
|
|
|
extern {
|
|
|
|
fn get_sp_limit() -> *c_void;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|