Use real atomic instructions instead of a global lock

This commit is contained in:
bjorn3 2021-02-09 14:36:25 +01:00
parent 92f765fce9
commit f2f5452089
8 changed files with 131 additions and 413 deletions

View File

@ -26,7 +26,7 @@ export RUSTC=$dir"/bin/cg_clif"
export RUSTDOCFLAGS=$linker' -Cpanic=abort -Zpanic-abort-tests '\
'-Zcodegen-backend='$dir'/lib/librustc_codegen_cranelift.'$dylib_ext' --sysroot '$dir
# FIXME remove once the atomic shim is gone
# FIXME fix `#[linkage = "extern_weak"]` without this
if [[ "$unamestr" == 'Darwin' ]]; then
export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup"
fi

View File

@ -1,185 +0,0 @@
//! Atomic intrinsics are implemented using a global lock for now, as Cranelift doesn't support
//! atomic operations yet.
// FIXME implement atomic instructions in Cranelift.
use crate::prelude::*;
#[cfg(all(feature = "jit", unix))]
#[no_mangle]
static mut __cg_clif_global_atomic_mutex: libc::pthread_mutex_t = libc::PTHREAD_MUTEX_INITIALIZER;
pub(crate) fn init_global_lock(
module: &mut impl Module,
bcx: &mut FunctionBuilder<'_>,
use_jit: bool,
) {
if use_jit {
// When using JIT, dylibs won't find the __cg_clif_global_atomic_mutex data object defined here,
// so instead we define it in the cg_clif dylib.
return;
}
let mut data_ctx = DataContext::new();
data_ctx.define_zeroinit(1024); // 1024 bytes should be big enough on all platforms.
data_ctx.set_align(16);
let atomic_mutex = module
.declare_data(
"__cg_clif_global_atomic_mutex",
Linkage::Export,
true,
false,
)
.unwrap();
module.define_data(atomic_mutex, &data_ctx).unwrap();
let pthread_mutex_init = module
.declare_function(
"pthread_mutex_init",
Linkage::Import,
&cranelift_codegen::ir::Signature {
call_conv: module.target_config().default_call_conv,
params: vec![
AbiParam::new(
module.target_config().pointer_type(), /* *mut pthread_mutex_t */
),
AbiParam::new(
module.target_config().pointer_type(), /* *const pthread_mutex_attr_t */
),
],
returns: vec![AbiParam::new(types::I32 /* c_int */)],
},
)
.unwrap();
let pthread_mutex_init = module.declare_func_in_func(pthread_mutex_init, bcx.func);
let atomic_mutex = module.declare_data_in_func(atomic_mutex, bcx.func);
let atomic_mutex = bcx
.ins()
.global_value(module.target_config().pointer_type(), atomic_mutex);
let nullptr = bcx.ins().iconst(module.target_config().pointer_type(), 0);
bcx.ins().call(pthread_mutex_init, &[atomic_mutex, nullptr]);
}
pub(crate) fn init_global_lock_constructor(
module: &mut impl Module,
constructor_name: &str,
) -> FuncId {
let sig = Signature::new(CallConv::SystemV);
let init_func_id = module
.declare_function(constructor_name, Linkage::Export, &sig)
.unwrap();
let mut ctx = Context::new();
ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig);
{
let mut func_ctx = FunctionBuilderContext::new();
let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
let block = bcx.create_block();
bcx.switch_to_block(block);
crate::atomic_shim::init_global_lock(module, &mut bcx, false);
bcx.ins().return_(&[]);
bcx.seal_all_blocks();
bcx.finalize();
}
module
.define_function(
init_func_id,
&mut ctx,
&mut cranelift_codegen::binemit::NullTrapSink {},
)
.unwrap();
init_func_id
}
pub(crate) fn lock_global_lock(fx: &mut FunctionCx<'_, '_, impl Module>) {
let atomic_mutex = fx
.cx
.module
.declare_data(
"__cg_clif_global_atomic_mutex",
Linkage::Import,
true,
false,
)
.unwrap();
let pthread_mutex_lock = fx
.cx
.module
.declare_function(
"pthread_mutex_lock",
Linkage::Import,
&cranelift_codegen::ir::Signature {
call_conv: fx.cx.module.target_config().default_call_conv,
params: vec![AbiParam::new(
fx.cx.module.target_config().pointer_type(), /* *mut pthread_mutex_t */
)],
returns: vec![AbiParam::new(types::I32 /* c_int */)],
},
)
.unwrap();
let pthread_mutex_lock = fx
.cx
.module
.declare_func_in_func(pthread_mutex_lock, fx.bcx.func);
let atomic_mutex = fx.cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
let atomic_mutex = fx
.bcx
.ins()
.global_value(fx.cx.module.target_config().pointer_type(), atomic_mutex);
fx.bcx.ins().call(pthread_mutex_lock, &[atomic_mutex]);
}
pub(crate) fn unlock_global_lock(fx: &mut FunctionCx<'_, '_, impl Module>) {
let atomic_mutex = fx
.cx
.module
.declare_data(
"__cg_clif_global_atomic_mutex",
Linkage::Import,
true,
false,
)
.unwrap();
let pthread_mutex_unlock = fx
.cx
.module
.declare_function(
"pthread_mutex_unlock",
Linkage::Import,
&cranelift_codegen::ir::Signature {
call_conv: fx.cx.module.target_config().default_call_conv,
params: vec![AbiParam::new(
fx.cx.module.target_config().pointer_type(), /* *mut pthread_mutex_t */
)],
returns: vec![AbiParam::new(types::I32 /* c_int */)],
},
)
.unwrap();
let pthread_mutex_unlock = fx
.cx
.module
.declare_func_in_func(pthread_mutex_unlock, fx.bcx.func);
let atomic_mutex = fx.cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
let atomic_mutex = fx
.bcx
.ins()
.global_value(fx.cx.module.target_config().pointer_type(), atomic_mutex);
fx.bcx.ins().call(pthread_mutex_unlock, &[atomic_mutex]);
}

View File

@ -8,7 +8,7 @@
use cranelift_module::FuncId;
use object::write::*;
use object::{RelocationEncoding, RelocationKind, SectionKind, SymbolFlags};
use object::{RelocationEncoding, SectionKind, SymbolFlags};
use cranelift_object::{ObjectBuilder, ObjectModule, ObjectProduct};
@ -118,49 +118,6 @@ fn add_debug_reloc(
}
}
// FIXME remove once atomic instructions are implemented in Cranelift.
pub(crate) trait AddConstructor {
fn add_constructor(&mut self, func_id: FuncId);
}
impl AddConstructor for ObjectProduct {
fn add_constructor(&mut self, func_id: FuncId) {
let symbol = self.function_symbol(func_id);
let segment = self
.object
.segment_name(object::write::StandardSegment::Data);
let init_array_section =
self.object
.add_section(segment.to_vec(), b".init_array".to_vec(), SectionKind::Data);
let address_size = self
.object
.architecture()
.address_size()
.expect("address_size must be known")
.bytes();
self.object.append_section_data(
init_array_section,
&std::iter::repeat(0)
.take(address_size.into())
.collect::<Vec<u8>>(),
8,
);
self.object
.add_relocation(
init_array_section,
object::write::Relocation {
offset: 0,
size: address_size * 8,
kind: RelocationKind::Absolute,
encoding: RelocationEncoding::Generic,
symbol,
addend: 0,
},
)
.unwrap();
}
}
pub(crate) fn with_object(sess: &Session, name: &str, f: impl FnOnce(&mut Object)) -> Vec<u8> {
let triple = crate::build_isa(sess).triple().clone();

View File

@ -12,12 +12,10 @@
use rustc_session::cgu_reuse_tracker::CguReuse;
use rustc_session::config::{DebugInfo, OutputType};
use cranelift_object::{ObjectModule, ObjectProduct};
use cranelift_object::ObjectModule;
use crate::{prelude::*, BackendConfig};
use crate::backend::AddConstructor;
fn new_module(tcx: TyCtxt<'_>, name: String) -> ObjectModule {
let module = crate::backend::make_module(tcx.sess, name);
assert_eq!(pointer_ty(tcx), module.target_config().pointer_type());
@ -39,7 +37,6 @@ fn emit_module(
module: ObjectModule,
debug: Option<DebugContext<'_>>,
unwind_context: UnwindContext<'_>,
map_product: impl FnOnce(ObjectProduct) -> ObjectProduct,
) -> ModuleCodegenResult {
let mut product = module.finish();
@ -49,8 +46,6 @@ fn emit_module(
unwind_context.emit(&mut product);
let product = map_product(product);
let tmp_file = tcx
.output_filenames(LOCAL_CRATE)
.temp_path(OutputType::Object, Some(&name));
@ -124,30 +119,7 @@ fn module_codegen(
let cgu = tcx.codegen_unit(cgu_name);
let mono_items = cgu.items_in_deterministic_order(tcx);
let mut module = new_module(tcx, cgu_name.as_str().to_string());
// Initialize the global atomic mutex using a constructor for proc-macros.
// FIXME implement atomic instructions in Cranelift.
let mut init_atomics_mutex_from_constructor = None;
if tcx
.sess
.crate_types()
.contains(&rustc_session::config::CrateType::ProcMacro)
{
if mono_items.iter().any(|(mono_item, _)| match mono_item {
rustc_middle::mir::mono::MonoItem::Static(def_id) => tcx
.symbol_name(Instance::mono(tcx, *def_id))
.name
.contains("__rustc_proc_macro_decls_"),
_ => false,
}) {
init_atomics_mutex_from_constructor =
Some(crate::atomic_shim::init_global_lock_constructor(
&mut module,
&format!("{}_init_atomics_mutex", cgu_name.as_str()),
));
}
}
let module = new_module(tcx, cgu_name.as_str().to_string());
let mut cx = crate::CodegenCx::new(
tcx,
@ -180,7 +152,7 @@ fn module_codegen(
}
let (mut module, global_asm, debug, mut unwind_context) =
tcx.sess.time("finalize CodegenCx", || cx.finalize());
crate::main_shim::maybe_create_entry_wrapper(tcx, &mut module, &mut unwind_context, false);
crate::main_shim::maybe_create_entry_wrapper(tcx, &mut module, &mut unwind_context);
let codegen_result = emit_module(
tcx,
@ -189,13 +161,6 @@ fn module_codegen(
module,
debug,
unwind_context,
|mut product| {
if let Some(func_id) = init_atomics_mutex_from_constructor {
product.add_constructor(func_id);
}
product
},
);
codegen_global_asm(tcx, &cgu.name().as_str(), &global_asm);
@ -275,7 +240,6 @@ pub(super) fn run_aot(
allocator_module,
None,
allocator_unwind_context,
|product| product,
);
if let Some((id, product)) = work_product {
work_products.insert(id, product);

View File

@ -23,24 +23,6 @@ pub(super) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
tcx.sess.fatal("JIT mode doesn't work with `cargo check`.");
}
#[cfg(unix)]
unsafe {
// When not using our custom driver rustc will open us without the RTLD_GLOBAL flag, so
// __cg_clif_global_atomic_mutex will not be exported. We fix this by opening ourself again
// as global.
// FIXME remove once atomic_shim is gone
let mut dl_info: libc::Dl_info = std::mem::zeroed();
assert_ne!(
libc::dladdr(run_jit as *const libc::c_void, &mut dl_info),
0
);
assert_ne!(
libc::dlopen(dl_info.dli_fname, libc::RTLD_NOW | libc::RTLD_GLOBAL),
std::ptr::null_mut(),
);
}
let imported_symbols = load_imported_symbols_for_jit(tcx);
let mut jit_builder = JITBuilder::with_isa(
@ -111,7 +93,7 @@ pub(super) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
tcx.sess.fatal("Inline asm is not supported in JIT mode");
}
crate::main_shim::maybe_create_entry_wrapper(tcx, &mut jit_module, &mut unwind_context, true);
crate::main_shim::maybe_create_entry_wrapper(tcx, &mut jit_module, &mut unwind_context);
crate::allocator::codegen(tcx, &mut jit_module, &mut unwind_context);
tcx.sess.abort_if_errors();

View File

@ -9,6 +9,7 @@
pub(crate) use llvm::codegen_llvm_intrinsic_call;
use crate::prelude::*;
use cranelift_codegen::ir::AtomicRmwOp;
use rustc_middle::ty::print::with_no_trimmed_paths;
macro intrinsic_pat {
@ -112,38 +113,6 @@
}
}
macro atomic_binop_return_old($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) {
crate::atomic_shim::lock_global_lock($fx);
let clif_ty = $fx.clif_type($T).unwrap();
let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
let new = $fx.bcx.ins().$op(old, $src);
$fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
$ret.write_cvalue($fx, CValue::by_val(old, $fx.layout_of($T)));
crate::atomic_shim::unlock_global_lock($fx);
}
macro atomic_minmax($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $ret:ident) {
crate::atomic_shim::lock_global_lock($fx);
// Read old
let clif_ty = $fx.clif_type($T).unwrap();
let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
// Compare
let is_eq = $fx.bcx.ins().icmp(IntCC::SignedGreaterThan, old, $src);
let new = $fx.bcx.ins().select(is_eq, old, $src);
// Write new
$fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
let ret_val = CValue::by_val(old, $ret.layout());
$ret.write_cvalue($fx, ret_val);
crate::atomic_shim::unlock_global_lock($fx);
}
macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
match $ty.kind() {
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
@ -912,136 +881,175 @@ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
};
_ if intrinsic.starts_with("atomic_fence"), () {
crate::atomic_shim::lock_global_lock(fx);
crate::atomic_shim::unlock_global_lock(fx);
fx.bcx.ins().fence();
};
_ if intrinsic.starts_with("atomic_singlethreadfence"), () {
crate::atomic_shim::lock_global_lock(fx);
crate::atomic_shim::unlock_global_lock(fx);
// FIXME use a compiler fence once Cranelift supports it
fx.bcx.ins().fence();
};
_ if intrinsic.starts_with("atomic_load"), (c ptr) {
crate::atomic_shim::lock_global_lock(fx);
_ if intrinsic.starts_with("atomic_load"), <T> (v ptr) {
validate_atomic_type!(fx, intrinsic, span, T);
let ty = fx.clif_type(T).unwrap();
let inner_layout =
fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
validate_atomic_type!(fx, intrinsic, span, inner_layout.ty);
let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
let val = fx.bcx.ins().atomic_load(ty, MemFlags::trusted(), ptr);
let val = CValue::by_val(val, fx.layout_of(T));
ret.write_cvalue(fx, val);
crate::atomic_shim::unlock_global_lock(fx);
};
_ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
crate::atomic_shim::lock_global_lock(fx);
let val = val.load_scalar(fx);
let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
dest.write_cvalue(fx, val);
crate::atomic_shim::unlock_global_lock(fx);
fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
};
_ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
validate_atomic_type!(fx, intrinsic, span, T);
_ if intrinsic.starts_with("atomic_xchg"), (v ptr, c new) {
let layout = new.layout();
validate_atomic_type!(fx, intrinsic, span, layout.ty);
let ty = fx.clif_type(layout.ty).unwrap();
crate::atomic_shim::lock_global_lock(fx);
let new = new.load_scalar(fx);
// Read old
let clif_ty = fx.clif_type(T).unwrap();
let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
// Write new
let dest = CPlace::for_ptr(Pointer::new(ptr), src.layout());
dest.write_cvalue(fx, src);
crate::atomic_shim::unlock_global_lock(fx);
let old = CValue::by_val(old, layout);
ret.write_cvalue(fx, old);
};
_ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
validate_atomic_type!(fx, intrinsic, span, T);
_ if intrinsic.starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
let layout = new.layout();
validate_atomic_type!(fx, intrinsic, span, layout.ty);
let test_old = test_old.load_scalar(fx);
let new = new.load_scalar(fx);
crate::atomic_shim::lock_global_lock(fx);
// Read old
let clif_ty = fx.clif_type(T).unwrap();
let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
// Compare
let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
let new = fx.bcx.ins().select(is_eq, new, old); // Keep old if not equal to test_old
// Write new
fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
ret.write_cvalue(fx, ret_val);
crate::atomic_shim::unlock_global_lock(fx);
ret.write_cvalue(fx, ret_val)
};
_ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, c amount) {
validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
_ if intrinsic.starts_with("atomic_xadd"), (v ptr, c amount) {
let layout = amount.layout();
validate_atomic_type!(fx, intrinsic, span, layout.ty);
let ty = fx.clif_type(layout.ty).unwrap();
let amount = amount.load_scalar(fx);
atomic_binop_return_old! (fx, iadd<T>(ptr, amount) -> ret);
let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
let old = CValue::by_val(old, layout);
ret.write_cvalue(fx, old);
};
_ if intrinsic.starts_with("atomic_xsub"), <T> (v ptr, c amount) {
validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
_ if intrinsic.starts_with("atomic_xsub"), (v ptr, c amount) {
let layout = amount.layout();
validate_atomic_type!(fx, intrinsic, span, layout.ty);
let ty = fx.clif_type(layout.ty).unwrap();
let amount = amount.load_scalar(fx);
atomic_binop_return_old! (fx, isub<T>(ptr, amount) -> ret);
let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
let old = CValue::by_val(old, layout);
ret.write_cvalue(fx, old);
};
_ if intrinsic.starts_with("atomic_and"), <T> (v ptr, c src) {
validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
let src = src.load_scalar(fx);
atomic_binop_return_old! (fx, band<T>(ptr, src) -> ret);
};
_ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, c src) {
validate_atomic_type!(fx, intrinsic, span, T);
_ if intrinsic.starts_with("atomic_and"), (v ptr, c src) {
let layout = src.layout();
validate_atomic_type!(fx, intrinsic, span, layout.ty);
let ty = fx.clif_type(layout.ty).unwrap();
let src = src.load_scalar(fx);
crate::atomic_shim::lock_global_lock(fx);
let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
let clif_ty = fx.clif_type(T).unwrap();
let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
let and = fx.bcx.ins().band(old, src);
let new = fx.bcx.ins().bnot(and);
fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
let old = CValue::by_val(old, layout);
ret.write_cvalue(fx, old);
};
_ if intrinsic.starts_with("atomic_or"), (v ptr, c src) {
let layout = src.layout();
validate_atomic_type!(fx, intrinsic, span, layout.ty);
let ty = fx.clif_type(layout.ty).unwrap();
crate::atomic_shim::unlock_global_lock(fx);
};
_ if intrinsic.starts_with("atomic_or"), <T> (v ptr, c src) {
validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
let src = src.load_scalar(fx);
atomic_binop_return_old! (fx, bor<T>(ptr, src) -> ret);
let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
let old = CValue::by_val(old, layout);
ret.write_cvalue(fx, old);
};
_ if intrinsic.starts_with("atomic_xor"), <T> (v ptr, c src) {
validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
_ if intrinsic.starts_with("atomic_xor"), (v ptr, c src) {
let layout = src.layout();
validate_atomic_type!(fx, intrinsic, span, layout.ty);
let ty = fx.clif_type(layout.ty).unwrap();
let src = src.load_scalar(fx);
atomic_binop_return_old! (fx, bxor<T>(ptr, src) -> ret);
let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
let old = CValue::by_val(old, layout);
ret.write_cvalue(fx, old);
};
_ if intrinsic.starts_with("atomic_max"), <T> (v ptr, c src) {
validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
// FIXME https://github.com/bytecodealliance/wasmtime/issues/2647
_ if intrinsic.starts_with("atomic_nand"), (v ptr, c src) {
let layout = src.layout();
validate_atomic_type!(fx, intrinsic, span, layout.ty);
let ty = fx.clif_type(layout.ty).unwrap();
let src = src.load_scalar(fx);
atomic_minmax!(fx, IntCC::SignedGreaterThan, <T> (ptr, src) -> ret);
let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
let old = CValue::by_val(old, layout);
ret.write_cvalue(fx, old);
};
_ if intrinsic.starts_with("atomic_umax"), <T> (v ptr, c src) {
validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
_ if intrinsic.starts_with("atomic_max"), (v ptr, c src) {
let layout = src.layout();
validate_atomic_type!(fx, intrinsic, span, layout.ty);
let ty = fx.clif_type(layout.ty).unwrap();
let src = src.load_scalar(fx);
atomic_minmax!(fx, IntCC::UnsignedGreaterThan, <T> (ptr, src) -> ret);
let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
let old = CValue::by_val(old, layout);
ret.write_cvalue(fx, old);
};
_ if intrinsic.starts_with("atomic_min"), <T> (v ptr, c src) {
validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
_ if intrinsic.starts_with("atomic_umax"), (v ptr, c src) {
let layout = src.layout();
validate_atomic_type!(fx, intrinsic, span, layout.ty);
let ty = fx.clif_type(layout.ty).unwrap();
let src = src.load_scalar(fx);
atomic_minmax!(fx, IntCC::SignedLessThan, <T> (ptr, src) -> ret);
let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
let old = CValue::by_val(old, layout);
ret.write_cvalue(fx, old);
};
_ if intrinsic.starts_with("atomic_umin"), <T> (v ptr, c src) {
validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
_ if intrinsic.starts_with("atomic_min"), (v ptr, c src) {
let layout = src.layout();
validate_atomic_type!(fx, intrinsic, span, layout.ty);
let ty = fx.clif_type(layout.ty).unwrap();
let src = src.load_scalar(fx);
atomic_minmax!(fx, IntCC::UnsignedLessThan, <T> (ptr, src) -> ret);
let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
let old = CValue::by_val(old, layout);
ret.write_cvalue(fx, old);
};
_ if intrinsic.starts_with("atomic_umin"), (v ptr, c src) {
let layout = src.layout();
validate_atomic_type!(fx, intrinsic, span, layout.ty);
let ty = fx.clif_type(layout.ty).unwrap();
let src = src.load_scalar(fx);
let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
let old = CValue::by_val(old, layout);
ret.write_cvalue(fx, old);
};
minnumf32, (v a, v b) {

View File

@ -12,8 +12,6 @@
#![warn(unused_lifetimes)]
#![warn(unreachable_pub)]
#[cfg(feature = "jit")]
extern crate libc;
extern crate snap;
#[macro_use]
extern crate rustc_middle;
@ -54,7 +52,6 @@
mod allocator;
mod analyze;
mod archive;
mod atomic_shim;
mod backend;
mod base;
mod cast;

View File

@ -9,7 +9,6 @@ pub(crate) fn maybe_create_entry_wrapper(
tcx: TyCtxt<'_>,
module: &mut impl Module,
unwind_context: &mut UnwindContext<'_>,
use_jit: bool,
) {
let (main_def_id, use_start_lang_item) = match tcx.entry_fn(LOCAL_CRATE) {
Some((def_id, entry_ty)) => (
@ -33,7 +32,6 @@ pub(crate) fn maybe_create_entry_wrapper(
unwind_context,
main_def_id,
use_start_lang_item,
use_jit,
);
fn create_entry_fn(
@ -42,7 +40,6 @@ fn create_entry_fn(
unwind_context: &mut UnwindContext<'_>,
rust_main_def_id: DefId,
use_start_lang_item: bool,
use_jit: bool,
) {
let main_ret_ty = tcx.fn_sig(rust_main_def_id).output();
// Given that `main()` has no arguments,
@ -86,8 +83,6 @@ fn create_entry_fn(
let arg_argc = bcx.append_block_param(block, m.target_config().pointer_type());
let arg_argv = bcx.append_block_param(block, m.target_config().pointer_type());
crate::atomic_shim::init_global_lock(m, &mut bcx, use_jit);
let main_func_ref = m.declare_func_in_func(main_func_id, &mut bcx.func);
let call_inst = if use_start_lang_item {