Merge pull request #179 from rust-lang/sync_from_rust2

Sync from rust2
This commit is contained in:
antoyo 2022-06-06 22:03:09 -04:00 committed by GitHub
commit e8dca3e87d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 154 additions and 67 deletions

View File

@ -12,7 +12,7 @@ A secondary goal is to check if using the gcc backend will provide any run-time
## Building
**This requires a patched libgccjit in order to work.
The patches in [this repostory](https://github.com/antoyo/libgccjit-patches) need to be applied.
The patches in [this repository](https://github.com/antoyo/libgccjit-patches) need to be applied.
(Those patches should work when applied on master, but in case it doesn't work, they are known to work when applied on 079c23cfe079f203d5df83fea8e92a60c7d7e878.)
You can also use my [fork of gcc](https://github.com/antoyo/gcc) which already includes these patches.**

View File

@ -37,7 +37,7 @@ impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<Wrapper<U>> for Wrapper<T> {}
trait Trait {
// This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable
// without unsized_locals), but wrappers arond `Self` currently are not.
// without unsized_locals), but wrappers around `Self` currently are not.
// FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented
// fn wrapper(self: Wrapper<Self>) -> i32;
fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32;

View File

@ -1,3 +1,3 @@
[toolchain]
channel = "nightly-2022-03-26"
channel = "nightly-2022-06-06"
components = ["rust-src", "rustc-dev", "llvm-tools-preview"]

View File

@ -10,7 +10,7 @@ case $1 in
rustup toolchain install --profile minimal nightly-${TOOLCHAIN} # Sanity check to see if the nightly exists
echo nightly-${TOOLCHAIN} > rust-toolchain
echo "=> Uninstalling all old nighlies"
echo "=> Uninstalling all old nightlies"
for nightly in $(rustup toolchain list | grep nightly | grep -v $TOOLCHAIN | grep -v nightly-x86_64); do
rustup toolchain uninstall $nightly
done

View File

@ -258,9 +258,14 @@ fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_opera
}
InlineAsmOperandRef::SymFn { instance } => {
// TODO(@Amanieu): Additional mangling is needed on
// some targets to add a leading underscore (Mach-O)
// or byte count suffixes (x86 Windows).
constants_len += self.tcx.symbol_name(instance).name.len();
}
InlineAsmOperandRef::SymStatic { def_id } => {
// TODO(@Amanieu): Additional mangling is needed on
// some targets to add a leading underscore (Mach-O).
constants_len += self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name.len();
}
}
@ -427,13 +432,16 @@ fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_opera
}
InlineAsmOperandRef::SymFn { instance } => {
// TODO(@Amanieu): Additional mangling is needed on
// some targets to add a leading underscore (Mach-O)
// or byte count suffixes (x86 Windows).
let name = self.tcx.symbol_name(instance).name;
template_str.push_str(name);
}
InlineAsmOperandRef::SymStatic { def_id } => {
// TODO(@Commeownist): This may not be sufficient for all kinds of statics.
// Some statics may need the `@plt` suffix, like thread-local vars.
// TODO(@Amanieu): Additional mangling is needed on
// some targets to add a leading underscore (Mach-O).
let instance = Instance::mono(self.tcx, def_id);
let name = self.tcx.symbol_name(instance).name;
template_str.push_str(name);
@ -596,9 +604,10 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister {
| InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "Yk",
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg0) => unimplemented!(),
InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(),
InlineAsmRegClass::X86(
X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg,
X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::tmm_reg,
) => unreachable!("clobber-only"),
InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
bug!("GCC backend does not support SPIR-V")
@ -661,6 +670,8 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl
| InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg0) => cx.type_i16(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::tmm_reg) => unimplemented!(),
InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
bug!("LLVM backend does not support SPIR-V")
@ -671,8 +682,8 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl
}
}
impl<'gcc, 'tcx> AsmMethods for CodegenCx<'gcc, 'tcx> {
fn codegen_global_asm(&self, template: &[InlineAsmTemplatePiece], operands: &[GlobalAsmOperandRef], options: InlineAsmOptions, _line_spans: &[Span]) {
impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn codegen_global_asm(&self, template: &[InlineAsmTemplatePiece], operands: &[GlobalAsmOperandRef<'tcx>], options: InlineAsmOptions, _line_spans: &[Span]) {
let asm_arch = self.tcx.sess.asm_arch.unwrap();
// Default to Intel syntax on x86
@ -705,6 +716,22 @@ fn codegen_global_asm(&self, template: &[InlineAsmTemplatePiece], operands: &[Gl
// here unlike normal inline assembly.
template_str.push_str(string);
}
GlobalAsmOperandRef::SymFn { instance } => {
// TODO(@Amanieu): Additional mangling is needed on
// some targets to add a leading underscore (Mach-O)
// or byte count suffixes (x86 Windows).
let name = self.tcx.symbol_name(instance).name;
template_str.push_str(name);
}
GlobalAsmOperandRef::SymStatic { def_id } => {
// TODO(@Amanieu): Additional mangling is needed on
// some targets to add a leading underscore (Mach-O).
let instance = Instance::mono(self.tcx, def_id);
let name = self.tcx.symbol_name(instance).name;
template_str.push_str(name);
}
}
}
}
@ -775,7 +802,8 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option
_ => unreachable!(),
},
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg) => {
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg0) => None,
InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::tmm_reg) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(),

View File

@ -11,7 +11,7 @@
use crate::{GccCodegenBackend, GccContext};
pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, module: ModuleCodegen<GccContext>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> {
let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &module.name[..]);
let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name);
{
let context = &module.module_llvm.context;

View File

@ -62,24 +62,6 @@ enum ExtremumOperation {
Min,
}
trait EnumClone {
fn clone(&self) -> Self;
}
impl EnumClone for AtomicOrdering {
fn clone(&self) -> Self {
match *self {
AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic,
AtomicOrdering::Unordered => AtomicOrdering::Unordered,
AtomicOrdering::Monotonic => AtomicOrdering::Monotonic,
AtomicOrdering::Acquire => AtomicOrdering::Acquire,
AtomicOrdering::Release => AtomicOrdering::Release,
AtomicOrdering::AcquireRelease => AtomicOrdering::AcquireRelease,
AtomicOrdering::SequentiallyConsistent => AtomicOrdering::SequentiallyConsistent,
}
}
}
pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
pub cx: &'a CodegenCx<'gcc, 'tcx>,
pub block: Block<'gcc>,
@ -104,9 +86,9 @@ fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, s
match order {
// TODO(antoyo): does this make sense?
AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
_ => order.clone(),
_ => order,
};
let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering.clone(), Size::from_bytes(size));
let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering, Size::from_bytes(size));
let previous_var = func.new_local(None, previous_value.get_type(), "previous_value");
let return_value = func.new_local(None, previous_value.get_type(), "return_value");
self.llbb().add_assignment(None, previous_var, previous_value);
@ -510,8 +492,11 @@ fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
}
fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
// TODO(antoyo): convert the arguments to unsigned?
// TODO(antoyo): poison if not exact.
let a_type = a.get_type().to_unsigned(self);
let a = self.gcc_int_cast(a, a_type);
let b_type = b.get_type().to_unsigned(self);
let b = self.gcc_int_cast(b, b_type);
a / b
}
@ -520,7 +505,7 @@ fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
}
fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
// TODO(antoyo): posion if not exact.
// TODO(antoyo): poison if not exact.
// FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
// should be the same.
let typ = a.get_type().to_signed(self);
@ -705,11 +690,11 @@ fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'t
}
fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) {
let vr = scalar.valid_range.clone();
match scalar.value {
let vr = scalar.valid_range(bx);
match scalar.primitive() {
abi::Int(..) => {
if !scalar.is_always_valid(bx) {
bx.range_metadata(load, scalar.valid_range);
bx.range_metadata(load, vr);
}
}
abi::Pointer if vr.start < vr.end && !vr.contains(0) => {
@ -735,7 +720,7 @@ fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load:
OperandValue::Immediate(self.to_immediate(load, place.layout))
}
else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
let b_offset = a.size(self).align_to(b.align(self).abi);
let pair_type = place.layout.gcc_type(self, false);
let mut load = |i, scalar: &abi::Scalar, align| {
@ -1275,7 +1260,7 @@ fn cx(&self) -> &CodegenCx<'gcc, 'tcx> {
}
fn do_not_inline(&mut self, _llret: RValue<'gcc>) {
// FIMXE(bjorn3): implement
// FIXME(bjorn3): implement
}
fn set_span(&mut self, _span: Span) {}
@ -1574,9 +1559,8 @@ fn to_gcc(self) -> i32 {
let ordering =
match self {
AtomicOrdering::NotAtomic => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
AtomicOrdering::Unordered => __ATOMIC_RELAXED,
AtomicOrdering::Monotonic => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
AtomicOrdering::Relaxed => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
AtomicOrdering::Release => __ATOMIC_RELEASE,
AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,

View File

@ -91,6 +91,10 @@ fn const_bool(&self, val: bool) -> RValue<'gcc> {
self.const_uint(self.type_i1(), val as u64)
}
fn const_i16(&self, i: i16) -> RValue<'gcc> {
self.const_int(self.type_i16(), i as i64)
}
fn const_i32(&self, i: i32) -> RValue<'gcc> {
self.const_int(self.type_i32(), i as i64)
}
@ -154,14 +158,14 @@ fn const_to_opt_u128(&self, _v: RValue<'gcc>, _sign_ext: bool) -> Option<u128> {
}
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
match cv {
Scalar::Int(ScalarInt::ZST) => {
assert_eq!(0, layout.value.size(self).bytes());
assert_eq!(0, layout.size(self).bytes());
self.const_undef(self.type_ix(0))
}
Scalar::Int(int) => {
let data = int.assert_bits(layout.value.size(self));
let data = int.assert_bits(layout.size(self));
// FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code
// the paths for floating-point values.
@ -205,7 +209,7 @@ fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, ty: Type<'gcc>) ->
let base_addr = self.const_bitcast(base_addr, self.usize_type);
let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64);
let ptr = self.const_bitcast(base_addr + offset, ptr_type);
if layout.value != Pointer {
if layout.primitive() != Pointer {
self.const_bitcast(ptr.dereference(None).to_rvalue(), ty)
}
else {
@ -275,6 +279,21 @@ fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
else if self.is_u128(cx) {
cx.i128_type
}
else if self.is_uchar(cx) {
cx.char_type
}
else if self.is_ushort(cx) {
cx.short_type
}
else if self.is_uint(cx) {
cx.int_type
}
else if self.is_ulong(cx) {
cx.long_type
}
else if self.is_ulonglong(cx) {
cx.longlong_type
}
else {
self.clone()
}
@ -296,6 +315,21 @@ fn to_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
else if self.is_i128(cx) {
cx.u128_type
}
else if self.is_char(cx) {
cx.uchar_type
}
else if self.is_short(cx) {
cx.ushort_type
}
else if self.is_int(cx) {
cx.uint_type
}
else if self.is_long(cx) {
cx.ulong_type
}
else if self.is_longlong(cx) {
cx.ulonglong_type
}
else {
self.clone()
}
@ -308,6 +342,11 @@ pub trait TypeReflection<'gcc, 'tcx> {
fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
fn is_ulong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
fn is_ulonglong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
fn is_char(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
fn is_short(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
fn is_int(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
fn is_long(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
fn is_longlong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
fn is_u8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
@ -328,11 +367,11 @@ pub trait TypeReflection<'gcc, 'tcx> {
impl<'gcc, 'tcx> TypeReflection<'gcc, 'tcx> for Type<'gcc> {
fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
self.unqualified() == cx.u8_type
self.unqualified() == cx.uchar_type
}
fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
self.unqualified() == cx.u16_type
self.unqualified() == cx.ushort_type
}
fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
@ -347,6 +386,26 @@ fn is_ulonglong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
self.unqualified() == cx.ulonglong_type
}
fn is_char(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
self.unqualified() == cx.char_type
}
fn is_short(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
self.unqualified() == cx.short_type
}
fn is_int(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
self.unqualified() == cx.int_type
}
fn is_long(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
self.unqualified() == cx.long_type
}
fn is_longlong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
self.unqualified() == cx.longlong_type
}
fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
self.unqualified() == cx.i8_type
}

View File

@ -329,7 +329,7 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl
interpret::Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
&cx.tcx,
),
abi::Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } },
abi::Scalar::Initialized { value: Primitive::Pointer, valid_range: WrappingRange::full(dl.pointer_size) },
cx.type_i8p(),
));
next_offset = offset + pointer_size;

View File

@ -54,10 +54,15 @@ pub struct CodegenCx<'gcc, 'tcx> {
pub u128_type: Type<'gcc>,
pub usize_type: Type<'gcc>,
pub char_type: Type<'gcc>,
pub uchar_type: Type<'gcc>,
pub short_type: Type<'gcc>,
pub ushort_type: Type<'gcc>,
pub int_type: Type<'gcc>,
pub uint_type: Type<'gcc>,
pub long_type: Type<'gcc>,
pub ulong_type: Type<'gcc>,
pub longlong_type: Type<'gcc>,
pub ulonglong_type: Type<'gcc>,
pub sizet_type: Type<'gcc>,
@ -111,7 +116,7 @@ pub struct CodegenCx<'gcc, 'tcx> {
/// NOTE: a hack is used because the rustc API is not suitable to libgccjit and as such,
/// `const_undef()` returns struct as pointer so that they can later be assigned a value.
/// As such, this set remembers which of these pointers were returned by this function so that
/// they can be deferenced later.
/// they can be dereferenced later.
/// FIXME(antoyo): fix the rustc API to avoid having this hack.
pub structs_as_pointer: RefCell<FxHashSet<RValue<'gcc>>>,
}
@ -146,10 +151,15 @@ pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>,
let float_type = context.new_type::<f32>();
let double_type = context.new_type::<f64>();
let char_type = context.new_c_type(CType::Char);
let uchar_type = context.new_c_type(CType::UChar);
let short_type = context.new_c_type(CType::Short);
let ushort_type = context.new_c_type(CType::UShort);
let int_type = context.new_c_type(CType::Int);
let uint_type = context.new_c_type(CType::UInt);
let long_type = context.new_c_type(CType::Long);
let ulong_type = context.new_c_type(CType::ULong);
let longlong_type = context.new_c_type(CType::LongLong);
let ulonglong_type = context.new_c_type(CType::ULongLong);
let sizet_type = context.new_c_type(CType::SizeT);
@ -202,10 +212,15 @@ pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>,
u32_type,
u64_type,
u128_type,
char_type,
uchar_type,
short_type,
ushort_type,
int_type,
uint_type,
long_type,
ulong_type,
longlong_type,
ulonglong_type,
sizet_type,

View File

@ -340,7 +340,7 @@ fn abort(&mut self) {
}
fn assume(&mut self, value: Self::Value) {
// TODO(antoyo): switch to asumme when it exists.
// TODO(antoyo): switch to assume when it exists.
// Or use something like this:
// #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0)
self.expect(value, true);

View File

@ -18,6 +18,7 @@
pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, name: Symbol, callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ret_ty: Ty<'tcx>, llret_ty: Type<'gcc>, span: Span) -> Result<RValue<'gcc>, ()> {
// macros for error handling:
#[allow(unused_macro_rules)]
macro_rules! emit_error {
($msg: tt) => {
emit_error!($msg, )

View File

@ -139,14 +139,12 @@ fn target_features(&self, sess: &Session) -> Vec<Symbol> {
}
impl ExtraBackendMethods for GccCodegenBackend {
fn new_metadata<'tcx>(&self, _tcx: TyCtxt<'tcx>, _mod_name: &str) -> Self::Module {
GccContext {
fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, module_name: &str, kind: AllocatorKind, has_alloc_error_handler: bool) -> Self::Module {
let mut mods = GccContext {
context: Context::default(),
}
}
fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, mods: &mut Self::Module, module_name: &str, kind: AllocatorKind, has_alloc_error_handler: bool) {
unsafe { allocator::codegen(tcx, mods, module_name, kind, has_alloc_error_handler) }
};
unsafe { allocator::codegen(tcx, &mut mods, module_name, kind, has_alloc_error_handler); }
mods
}
fn compile_codegen_unit<'tcx>(&self, tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen<Self::Module>, u64) {
@ -213,7 +211,7 @@ fn run_fat_lto(_cgcx: &CodegenContext<Self>, mut modules: Vec<FatLTOInput<Self>>
unimplemented!();
}
};
Ok(LtoModuleCodegen::Fat { module: Some(module), _serialized_bitcode: vec![] })
Ok(LtoModuleCodegen::Fat { module, _serialized_bitcode: vec![] })
}
fn run_thin_lto(_cgcx: &CodegenContext<Self>, _modules: Vec<(String, Self::ThinBuffer)>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
@ -229,7 +227,12 @@ unsafe fn optimize(_cgcx: &CodegenContext<Self>, _diag_handler: &Handler, module
Ok(())
}
unsafe fn optimize_thin(_cgcx: &CodegenContext<Self>, _thin: &mut ThinModule<Self>) -> Result<ModuleCodegen<Self::Module>, FatalError> {
fn optimize_fat(_cgcx: &CodegenContext<Self>, _module: &mut ModuleCodegen<Self::Module>) -> Result<(), FatalError> {
// TODO(antoyo)
Ok(())
}
unsafe fn optimize_thin(_cgcx: &CodegenContext<Self>, _thin: ThinModule<Self>) -> Result<ModuleCodegen<Self::Module>, FatalError> {
unimplemented!();
}
@ -245,11 +248,6 @@ fn serialize_module(_module: ModuleCodegen<Self::Module>) -> (String, Self::Modu
unimplemented!();
}
fn run_lto_pass_manager(_cgcx: &CodegenContext<Self>, _module: &ModuleCodegen<Self::Module>, _config: &ModuleConfig, _thin: bool) -> Result<(), FatalError> {
// TODO(antoyo)
Ok(())
}
fn run_link(cgcx: &CodegenContext<Self>, diag_handler: &Handler, modules: Vec<ModuleCodegen<Self::Module>>) -> Result<ModuleCodegen<Self::Module>, FatalError> {
back::write::link(cgcx, diag_handler, modules)
}
@ -287,8 +285,10 @@ fn handle_native(name: &str) -> &str {
}
pub fn target_cpu(sess: &Session) -> &str {
let name = sess.opts.cg.target_cpu.as_ref().unwrap_or(&sess.target.cpu);
handle_native(name)
match sess.opts.cg.target_cpu {
Some(ref name) => handle_native(name),
None => handle_native(sess.target.cpu.as_ref()),
}
}
pub fn target_features(sess: &Session) -> Vec<Symbol> {

View File

@ -248,7 +248,7 @@ fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
}
fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc> {
match scalar.value {
match scalar.primitive() {
Int(i, true) => cx.type_from_integer(i),
Int(i, false) => cx.type_from_unsigned_integer(i),
F32 => cx.type_f32(),
@ -306,7 +306,7 @@ fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index:
Size::ZERO
}
else {
a.value.size(cx).align_to(b.value.align(cx).abi)
a.size(cx).align_to(b.align(cx).abi)
};
self.scalar_gcc_type_at(cx, scalar, offset)
}