rust/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

2000 lines
69 KiB
C++
Raw Normal View History

2020-07-26 12:11:30 -05:00
#include "LLVMWrapper.h"
2017-03-16 15:10:04 -05:00
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DiagnosticHandler.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsARM.h"
2022-04-19 07:55:24 -05:00
#include "llvm/IR/Mangler.h"
2022-11-04 11:20:42 -05:00
#if LLVM_VERSION_GE(16, 0)
#include "llvm/Support/ModRef.h"
2022-11-04 11:20:42 -05:00
#endif
#include "llvm/Object/Archive.h"
#include "llvm/Object/COFFImportFile.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Pass.h"
#include "llvm/Bitcode/BitcodeWriter.h"
#include "llvm/Support/Signals.h"
#if LLVM_VERSION_LT(16, 0)
#include "llvm/ADT/Optional.h"
#endif
#include <iostream>
//===----------------------------------------------------------------------===
//
// This file defines alternate interfaces to core functions that are more
// readily callable by Rust's FFI.
//
//===----------------------------------------------------------------------===
using namespace llvm;
using namespace llvm::sys;
rustc: Optimize reading metadata by 4x We were previously reading metadata via `ar p`, but as learned from rustdoc awhile back, spawning a process to do something is pretty slow. Turns out LLVM has an Archive class to read archives, but it cannot write archives. This commits adds bindings to the read-only version of the LLVM archive class (with a new type that only has a read() method), and then it uses this class when reading the metadata out of rlibs. When you put this in tandem of not compressing the metadata, reading the metadata is 4x faster than it used to be The timings I got for reading metadata from the respective libraries was: libstd-04ff901e-0.9-pre.dylib => 100ms libstd-04ff901e-0.9-pre.rlib => 23ms librustuv-7945354c-0.9-pre.dylib => 4ms librustuv-7945354c-0.9-pre.rlib => 1ms librustc-5b94a16f-0.9-pre.dylib => 87ms librustc-5b94a16f-0.9-pre.rlib => 35ms libextra-a6ebb16f-0.9-pre.dylib => 63ms libextra-a6ebb16f-0.9-pre.rlib => 15ms libsyntax-2e4c0458-0.9-pre.dylib => 86ms libsyntax-2e4c0458-0.9-pre.rlib => 22ms In order to always take advantage of these faster metadata read-times, I sort the files in filesearch based on whether they have an rlib extension or not (prefer all rlib files first). Overall, this halved the compile time for a `fn main() {}` crate from 0.185s to 0.095s on my system (when preferring dynamic linking). Reading metadata is still the slowest pass of the compiler at 0.035s, but it's getting pretty close to linking at 0.021s! The next best optimization is to just not copy the metadata from LLVM because that's the most expensive part of reading metadata right now.
2013-12-16 22:58:21 -06:00
using namespace llvm::object;
// LLVMAtomicOrdering is already an enum - don't create another
// one.
static AtomicOrdering fromRust(LLVMAtomicOrdering Ordering) {
switch (Ordering) {
case LLVMAtomicOrderingNotAtomic:
return AtomicOrdering::NotAtomic;
case LLVMAtomicOrderingUnordered:
return AtomicOrdering::Unordered;
case LLVMAtomicOrderingMonotonic:
return AtomicOrdering::Monotonic;
case LLVMAtomicOrderingAcquire:
return AtomicOrdering::Acquire;
case LLVMAtomicOrderingRelease:
return AtomicOrdering::Release;
case LLVMAtomicOrderingAcquireRelease:
return AtomicOrdering::AcquireRelease;
case LLVMAtomicOrderingSequentiallyConsistent:
return AtomicOrdering::SequentiallyConsistent;
}
report_fatal_error("Invalid LLVMAtomicOrdering value!");
}
static LLVM_THREAD_LOCAL char *LastError;
// Custom error handler for fatal LLVM errors.
//
// Notably it exits the process with code 101, unlike LLVM's default of 1.
static void FatalErrorHandler(void *UserData,
#if LLVM_VERSION_LT(14, 0)
const std::string& Reason,
#else
const char* Reason,
#endif
bool GenCrashDiag) {
// Do the same thing that the default error handler does.
std::cerr << "LLVM ERROR: " << Reason << std::endl;
// Since this error handler exits the process, we have to run any cleanup that
// LLVM would run after handling the error. This might change with an LLVM
// upgrade.
sys::RunInterruptHandlers();
exit(101);
}
extern "C" void LLVMRustInstallFatalErrorHandler() {
install_fatal_error_handler(FatalErrorHandler);
}
extern "C" void LLVMRustDisableSystemDialogsOnCrash() {
sys::DisableSystemDialogsOnCrash();
}
extern "C" char *LLVMRustGetLastError(void) {
char *Ret = LastError;
LastError = nullptr;
return Ret;
}
extern "C" void LLVMRustSetLastError(const char *Err) {
free((void *)LastError);
LastError = strdup(Err);
}
extern "C" LLVMContextRef LLVMRustContextCreate(bool shouldDiscardNames) {
auto ctx = new LLVMContext();
ctx->setDiscardValueNames(shouldDiscardNames);
return wrap(ctx);
}
extern "C" void LLVMRustSetNormalizedTarget(LLVMModuleRef M,
const char *Triple) {
unwrap(M)->setTargetTriple(Triple::normalize(Triple));
}
2011-05-10 18:10:08 -05:00
extern "C" void LLVMRustPrintPassTimings() {
raw_fd_ostream OS(2, false); // stderr.
2011-05-10 18:10:08 -05:00
TimerGroup::printAll(OS);
}
extern "C" LLVMValueRef LLVMRustGetNamedValue(LLVMModuleRef M, const char *Name,
size_t NameLen) {
return wrap(unwrap(M)->getNamedValue(StringRef(Name, NameLen)));
2015-02-27 05:37:33 -06:00
}
extern "C" LLVMValueRef LLVMRustGetOrInsertFunction(LLVMModuleRef M,
const char *Name,
size_t NameLen,
LLVMTypeRef FunctionTy) {
return wrap(unwrap(M)
->getOrInsertFunction(StringRef(Name, NameLen),
unwrap<FunctionType>(FunctionTy))
.getCallee()
);
}
extern "C" LLVMValueRef
LLVMRustGetOrInsertGlobal(LLVMModuleRef M, const char *Name, size_t NameLen, LLVMTypeRef Ty) {
Module *Mod = unwrap(M);
StringRef NameRef(Name, NameLen);
// We don't use Module::getOrInsertGlobal because that returns a Constant*,
// which may either be the real GlobalVariable*, or a constant bitcast of it
// if our type doesn't match the original declaration. We always want the
// GlobalVariable* so we can access linkage, visibility, etc.
GlobalVariable *GV = Mod->getGlobalVariable(NameRef, true);
if (!GV)
GV = new GlobalVariable(*Mod, unwrap(Ty), false,
GlobalValue::ExternalLinkage, nullptr, NameRef);
return wrap(GV);
}
extern "C" LLVMValueRef
LLVMRustInsertPrivateGlobal(LLVMModuleRef M, LLVMTypeRef Ty) {
return wrap(new GlobalVariable(*unwrap(M),
unwrap(Ty),
false,
GlobalValue::PrivateLinkage,
nullptr));
}
extern "C" LLVMTypeRef LLVMRustMetadataTypeInContext(LLVMContextRef C) {
return wrap(Type::getMetadataTy(*unwrap(C)));
}
static Attribute::AttrKind fromRust(LLVMRustAttribute Kind) {
switch (Kind) {
case AlwaysInline:
return Attribute::AlwaysInline;
case ByVal:
return Attribute::ByVal;
case Cold:
return Attribute::Cold;
case InlineHint:
return Attribute::InlineHint;
case MinSize:
return Attribute::MinSize;
case Naked:
return Attribute::Naked;
case NoAlias:
return Attribute::NoAlias;
case NoCapture:
return Attribute::NoCapture;
case NoCfCheck:
return Attribute::NoCfCheck;
case NoInline:
return Attribute::NoInline;
case NonNull:
return Attribute::NonNull;
case NoRedZone:
return Attribute::NoRedZone;
case NoReturn:
return Attribute::NoReturn;
case NoUnwind:
return Attribute::NoUnwind;
case OptimizeForSize:
return Attribute::OptimizeForSize;
case ReadOnly:
return Attribute::ReadOnly;
case SExt:
return Attribute::SExt;
case StructRet:
return Attribute::StructRet;
case UWTable:
return Attribute::UWTable;
case ZExt:
return Attribute::ZExt;
case InReg:
return Attribute::InReg;
2016-12-29 22:28:11 -06:00
case SanitizeThread:
return Attribute::SanitizeThread;
case SanitizeAddress:
return Attribute::SanitizeAddress;
case SanitizeMemory:
return Attribute::SanitizeMemory;
case NonLazyBind:
return Attribute::NonLazyBind;
case OptimizeNone:
return Attribute::OptimizeNone;
2019-02-09 08:55:30 -06:00
case ReturnsTwice:
return Attribute::ReturnsTwice;
2020-02-17 15:36:01 -06:00
case ReadNone:
return Attribute::ReadNone;
2021-01-22 20:32:38 -06:00
case SanitizeHWAddress:
return Attribute::SanitizeHWAddress;
2021-02-20 10:02:23 -06:00
case WillReturn:
return Attribute::WillReturn;
add rustc option for using LLVM stack smash protection LLVM has built-in heuristics for adding stack canaries to functions. These heuristics can be selected with LLVM function attributes. This patch adds a rustc option `-Z stack-protector={none,basic,strong,all}` which controls the use of these attributes. This gives rustc the same stack smash protection support as clang offers through options `-fno-stack-protector`, `-fstack-protector`, `-fstack-protector-strong`, and `-fstack-protector-all`. The protection this can offer is demonstrated in test/ui/abi/stack-protector.rs. This fills a gap in the current list of rustc exploit mitigations (https://doc.rust-lang.org/rustc/exploit-mitigations.html), originally discussed in #15179. Stack smash protection adds runtime overhead and is therefore still off by default, but now users have the option to trade performance for security as they see fit. An example use case is adding Rust code in an existing C/C++ code base compiled with stack smash protection. Without the ability to add stack smash protection to the Rust code, the code base artifacts could be exploitable in ways not possible if the code base remained pure C/C++. Stack smash protection support is present in LLVM for almost all the current tier 1/tier 2 targets: see test/assembly/stack-protector/stack-protector-target-support.rs. The one exception is nvptx64-nvidia-cuda. This patch follows clang's example, and adds a warning message printed if stack smash protection is used with this target (see test/ui/stack-protector/warn-stack-protector-unsupported.rs). Support for tier 3 targets has not been checked. Since the heuristics are applied at the LLVM level, the heuristics are expected to add stack smash protection to a fraction of functions comparable to C/C++. Some experiments demonstrating how Rust code is affected by the different heuristics can be found in test/assembly/stack-protector/stack-protector-heuristics-effect.rs. There is potential for better heuristics using Rust-specific safety information. For example it might be reasonable to skip stack smash protection in functions which transitively only use safe Rust code, or which uses only a subset of functions the user declares safe (such as anything under `std.*`). Such alternative heuristics could be added at a later point. LLVM also offers a "safestack" sanitizer as an alternative way to guard against stack smashing (see #26612). This could possibly also be included as a stack-protection heuristic. An alternative is to add it as a sanitizer (#39699). This is what clang does: safestack is exposed with option `-fsanitize=safe-stack`. The options are only supported by the LLVM backend, but as with other codegen options it is visible in the main codegen option help menu. The heuristic names "basic", "strong", and "all" are hopefully sufficiently generic to be usable in other backends as well. Reviewed-by: Nikita Popov <nikic@php.net> Extra commits during review: - [address-review] make the stack-protector option unstable - [address-review] reduce detail level of stack-protector option help text - [address-review] correct grammar in comment - [address-review] use compiler flag to avoid merging functions in test - [address-review] specify min LLVM version in fortanix stack-protector test Only for Fortanix test, since this target specifically requests the `--x86-experimental-lvi-inline-asm-hardening` flag. - [address-review] specify required LLVM components in stack-protector tests - move stack protector option enum closer to other similar option enums - rustc_interface/tests: sort debug option list in tracking hash test - add an explicit `none` stack-protector option Revert "set LLVM requirements for all stack protector support test revisions" This reverts commit a49b74f92a4e7d701d6f6cf63d207a8aff2e0f68.
2021-04-06 14:37:49 -05:00
case StackProtectReq:
return Attribute::StackProtectReq;
case StackProtectStrong:
return Attribute::StackProtectStrong;
case StackProtect:
return Attribute::StackProtect;
case NoUndef:
return Attribute::NoUndef;
case SanitizeMemTag:
return Attribute::SanitizeMemTag;
case ShadowCallStack:
return Attribute::ShadowCallStack;
case AllocSize:
return Attribute::AllocSize;
#if LLVM_VERSION_GE(15, 0)
case AllocatedPointer:
return Attribute::AllocatedPointer;
case AllocAlign:
return Attribute::AllocAlign;
#endif
}
report_fatal_error("bad AttributeKind");
}
template<typename T> static inline void AddAttributes(T *t, unsigned Index,
LLVMAttributeRef *Attrs, size_t AttrsLen) {
AttributeList PAL = t->getAttributes();
AttributeList PALNew;
#if LLVM_VERSION_LT(14, 0)
AttrBuilder B;
for (LLVMAttributeRef Attr : makeArrayRef(Attrs, AttrsLen))
B.addAttribute(unwrap(Attr));
PALNew = PAL.addAttributes(t->getContext(), Index, B);
#else
AttrBuilder B(t->getContext());
for (LLVMAttributeRef Attr : makeArrayRef(Attrs, AttrsLen))
B.addAttribute(unwrap(Attr));
PALNew = PAL.addAttributesAtIndex(t->getContext(), Index, B);
#endif
t->setAttributes(PALNew);
}
extern "C" void LLVMRustAddFunctionAttributes(LLVMValueRef Fn, unsigned Index,
LLVMAttributeRef *Attrs, size_t AttrsLen) {
Function *F = unwrap<Function>(Fn);
AddAttributes(F, Index, Attrs, AttrsLen);
}
extern "C" void LLVMRustAddCallSiteAttributes(LLVMValueRef Instr, unsigned Index,
LLVMAttributeRef *Attrs, size_t AttrsLen) {
2020-06-25 20:52:41 -05:00
CallBase *Call = unwrap<CallBase>(Instr);
AddAttributes(Call, Index, Attrs, AttrsLen);
2019-07-06 14:52:25 -05:00
}
extern "C" LLVMAttributeRef LLVMRustCreateAttrNoValue(LLVMContextRef C,
LLVMRustAttribute RustAttr) {
return wrap(Attribute::get(*unwrap(C), fromRust(RustAttr)));
2020-11-03 15:47:16 -06:00
}
extern "C" LLVMAttributeRef LLVMRustCreateAlignmentAttr(LLVMContextRef C,
uint64_t Bytes) {
return wrap(Attribute::getWithAlignment(*unwrap(C), llvm::Align(Bytes)));
}
extern "C" LLVMAttributeRef LLVMRustCreateDereferenceableAttr(LLVMContextRef C,
uint64_t Bytes) {
return wrap(Attribute::getWithDereferenceableBytes(*unwrap(C), Bytes));
}
extern "C" LLVMAttributeRef LLVMRustCreateDereferenceableOrNullAttr(LLVMContextRef C,
uint64_t Bytes) {
return wrap(Attribute::getWithDereferenceableOrNullBytes(*unwrap(C), Bytes));
}
extern "C" LLVMAttributeRef LLVMRustCreateByValAttr(LLVMContextRef C, LLVMTypeRef Ty) {
return wrap(Attribute::getWithByValType(*unwrap(C), unwrap(Ty)));
2019-07-06 14:52:25 -05:00
}
extern "C" LLVMAttributeRef LLVMRustCreateStructRetAttr(LLVMContextRef C, LLVMTypeRef Ty) {
return wrap(Attribute::getWithStructRetType(*unwrap(C), unwrap(Ty)));
2020-11-03 15:47:16 -06:00
}
extern "C" LLVMAttributeRef LLVMRustCreateElementTypeAttr(LLVMContextRef C, LLVMTypeRef Ty) {
#if LLVM_VERSION_GE(15, 0)
return wrap(Attribute::get(*unwrap(C), Attribute::ElementType, unwrap(Ty)));
#else
report_fatal_error("Should not be needed on LLVM < 15");
#endif
}
extern "C" LLVMAttributeRef LLVMRustCreateUWTableAttr(LLVMContextRef C, bool Async) {
#if LLVM_VERSION_LT(15, 0)
return wrap(Attribute::get(*unwrap(C), Attribute::UWTable));
#else
return wrap(Attribute::getWithUWTableKind(
*unwrap(C), Async ? UWTableKind::Async : UWTableKind::Sync));
#endif
}
extern "C" LLVMAttributeRef LLVMRustCreateAllocSizeAttr(LLVMContextRef C, uint32_t ElementSizeArg) {
return wrap(Attribute::getWithAllocSizeArgs(*unwrap(C), ElementSizeArg,
#if LLVM_VERSION_LT(16, 0)
None
#else
std::nullopt
#endif
));
}
#if LLVM_VERSION_GE(15, 0)
// These values **must** match ffi::AllocKindFlags.
// It _happens_ to match the LLVM values of llvm::AllocFnKind,
// but that's happenstance and we do explicit conversions before
// passing them to LLVM.
enum class LLVMRustAllocKindFlags : uint64_t {
Unknown = 0,
Alloc = 1,
Realloc = 1 << 1,
Free = 1 << 2,
Uninitialized = 1 << 3,
Zeroed = 1 << 4,
Aligned = 1 << 5,
};
static LLVMRustAllocKindFlags operator&(LLVMRustAllocKindFlags A, LLVMRustAllocKindFlags B) {
return static_cast<LLVMRustAllocKindFlags>(static_cast<uint64_t>(A) &
static_cast<uint64_t>(B));
}
static bool isSet(LLVMRustAllocKindFlags F) { return F != LLVMRustAllocKindFlags::Unknown; }
static llvm::AllocFnKind allocKindFromRust(LLVMRustAllocKindFlags F) {
llvm::AllocFnKind AFK = llvm::AllocFnKind::Unknown;
if (isSet(F & LLVMRustAllocKindFlags::Alloc)) {
AFK |= llvm::AllocFnKind::Alloc;
}
if (isSet(F & LLVMRustAllocKindFlags::Realloc)) {
AFK |= llvm::AllocFnKind::Realloc;
}
if (isSet(F & LLVMRustAllocKindFlags::Free)) {
AFK |= llvm::AllocFnKind::Free;
}
if (isSet(F & LLVMRustAllocKindFlags::Uninitialized)) {
AFK |= llvm::AllocFnKind::Uninitialized;
}
if (isSet(F & LLVMRustAllocKindFlags::Zeroed)) {
AFK |= llvm::AllocFnKind::Zeroed;
}
if (isSet(F & LLVMRustAllocKindFlags::Aligned)) {
AFK |= llvm::AllocFnKind::Aligned;
}
return AFK;
}
#endif
extern "C" LLVMAttributeRef LLVMRustCreateAllocKindAttr(LLVMContextRef C, uint64_t AllocKindArg) {
#if LLVM_VERSION_GE(15, 0)
return wrap(Attribute::get(*unwrap(C), Attribute::AllocKind,
static_cast<uint64_t>(allocKindFromRust(static_cast<LLVMRustAllocKindFlags>(AllocKindArg)))));
#else
report_fatal_error(
"allockind attributes are new in LLVM 15 and should not be used on older LLVMs");
#endif
}
2022-11-04 11:20:42 -05:00
// Simplified representation of `MemoryEffects` across the FFI boundary.
//
// Each variant corresponds to one of the static factory methods on `MemoryEffects`.
enum class LLVMRustMemoryEffects {
None,
ReadOnly,
InaccessibleMemOnly,
};
extern "C" LLVMAttributeRef LLVMRustCreateMemoryEffectsAttr(LLVMContextRef C,
LLVMRustMemoryEffects Effects) {
#if LLVM_VERSION_GE(16, 0)
switch (Effects) {
case LLVMRustMemoryEffects::None:
return wrap(Attribute::getWithMemoryEffects(*unwrap(C), MemoryEffects::none()));
case LLVMRustMemoryEffects::ReadOnly:
return wrap(Attribute::getWithMemoryEffects(*unwrap(C), MemoryEffects::readOnly()));
case LLVMRustMemoryEffects::InaccessibleMemOnly:
return wrap(Attribute::getWithMemoryEffects(*unwrap(C),
MemoryEffects::inaccessibleMemOnly()));
default:
report_fatal_error("bad MemoryEffects.");
}
#else
switch (Effects) {
case LLVMRustMemoryEffects::None:
return wrap(Attribute::get(*unwrap(C), Attribute::ReadNone));
case LLVMRustMemoryEffects::ReadOnly:
return wrap(Attribute::get(*unwrap(C), Attribute::ReadOnly));
case LLVMRustMemoryEffects::InaccessibleMemOnly:
return wrap(Attribute::get(*unwrap(C), Attribute::InaccessibleMemOnly));
default:
report_fatal_error("bad MemoryEffects.");
}
#endif
}
// Enable a fast-math flag
//
// https://llvm.org/docs/LangRef.html#fast-math-flags
extern "C" void LLVMRustSetFastMath(LLVMValueRef V) {
if (auto I = dyn_cast<Instruction>(unwrap<Value>(V))) {
I->setFast(true);
}
}
extern "C" LLVMValueRef
LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Source,
const char *Name, LLVMAtomicOrdering Order) {
2020-06-25 20:52:41 -05:00
Value *Ptr = unwrap(Source);
LoadInst *LI = unwrap(B)->CreateLoad(unwrap(Ty), Ptr, Name);
LI->setAtomic(fromRust(Order));
2020-06-25 20:52:41 -05:00
return wrap(LI);
}
extern "C" LLVMValueRef LLVMRustBuildAtomicStore(LLVMBuilderRef B,
LLVMValueRef V,
LLVMValueRef Target,
LLVMAtomicOrdering Order) {
2020-06-25 20:52:41 -05:00
StoreInst *SI = unwrap(B)->CreateStore(unwrap(V), unwrap(Target));
SI->setAtomic(fromRust(Order));
2020-06-25 20:52:41 -05:00
return wrap(SI);
}
2016-08-02 16:25:19 -05:00
enum class LLVMRustAsmDialect {
Att,
Intel,
2016-08-02 16:25:19 -05:00
};
static InlineAsm::AsmDialect fromRust(LLVMRustAsmDialect Dialect) {
switch (Dialect) {
case LLVMRustAsmDialect::Att:
return InlineAsm::AD_ATT;
case LLVMRustAsmDialect::Intel:
return InlineAsm::AD_Intel;
default:
report_fatal_error("bad AsmDialect.");
}
2016-08-02 16:25:19 -05:00
}
extern "C" LLVMValueRef
LLVMRustInlineAsm(LLVMTypeRef Ty, char *AsmString, size_t AsmStringLen,
char *Constraints, size_t ConstraintsLen,
LLVMBool HasSideEffects, LLVMBool IsAlignStack,
LLVMRustAsmDialect Dialect, LLVMBool CanThrow) {
return wrap(InlineAsm::get(unwrap<FunctionType>(Ty),
StringRef(AsmString, AsmStringLen),
StringRef(Constraints, ConstraintsLen),
HasSideEffects, IsAlignStack,
fromRust(Dialect), CanThrow));
2013-03-10 00:37:50 -06:00
}
extern "C" bool LLVMRustInlineAsmVerify(LLVMTypeRef Ty, char *Constraints,
size_t ConstraintsLen) {
#if LLVM_VERSION_LT(15, 0)
return InlineAsm::Verify(unwrap<FunctionType>(Ty),
StringRef(Constraints, ConstraintsLen));
#else
// llvm::Error converts to true if it is an error.
return !llvm::errorToBool(InlineAsm::verify(
unwrap<FunctionType>(Ty), StringRef(Constraints, ConstraintsLen)));
#endif
}
extern "C" void LLVMRustAppendModuleInlineAsm(LLVMModuleRef M, const char *Asm,
size_t AsmLen) {
unwrap(M)->appendModuleInlineAsm(StringRef(Asm, AsmLen));
2017-03-21 19:50:23 -05:00
}
typedef DIBuilder *LLVMRustDIBuilderRef;
2013-06-14 13:38:29 -05:00
2017-07-21 06:15:36 -05:00
template <typename DIT> DIT *unwrapDIPtr(LLVMMetadataRef Ref) {
return (DIT *)(Ref ? unwrap<MDNode>(Ref) : nullptr);
}
#define DIDescriptor DIScope
#define DIArray DINodeArray
#define unwrapDI unwrapDIPtr
2013-06-14 13:38:29 -05:00
// These values **must** match debuginfo::DIFlags! They also *happen*
// to match LLVM, but that isn't required as we do giant sets of
// matching below. The value shouldn't be directly passed to LLVM.
enum class LLVMRustDIFlags : uint32_t {
FlagZero = 0,
FlagPrivate = 1,
FlagProtected = 2,
FlagPublic = 3,
FlagFwdDecl = (1 << 2),
FlagAppleBlock = (1 << 3),
FlagBlockByrefStruct = (1 << 4),
FlagVirtual = (1 << 5),
FlagArtificial = (1 << 6),
FlagExplicit = (1 << 7),
FlagPrototyped = (1 << 8),
FlagObjcClassComplete = (1 << 9),
FlagObjectPointer = (1 << 10),
FlagVector = (1 << 11),
FlagStaticMember = (1 << 12),
FlagLValueReference = (1 << 13),
FlagRValueReference = (1 << 14),
FlagExternalTypeRef = (1 << 15),
FlagIntroducedVirtual = (1 << 18),
FlagBitField = (1 << 19),
FlagNoReturn = (1 << 20),
// Do not add values that are not supported by the minimum LLVM
// version we support! see llvm/include/llvm/IR/DebugInfoFlags.def
};
inline LLVMRustDIFlags operator&(LLVMRustDIFlags A, LLVMRustDIFlags B) {
return static_cast<LLVMRustDIFlags>(static_cast<uint32_t>(A) &
static_cast<uint32_t>(B));
}
inline LLVMRustDIFlags operator|(LLVMRustDIFlags A, LLVMRustDIFlags B) {
return static_cast<LLVMRustDIFlags>(static_cast<uint32_t>(A) |
static_cast<uint32_t>(B));
}
inline LLVMRustDIFlags &operator|=(LLVMRustDIFlags &A, LLVMRustDIFlags B) {
return A = A | B;
}
inline bool isSet(LLVMRustDIFlags F) { return F != LLVMRustDIFlags::FlagZero; }
inline LLVMRustDIFlags visibility(LLVMRustDIFlags F) {
return static_cast<LLVMRustDIFlags>(static_cast<uint32_t>(F) & 0x3);
}
static DINode::DIFlags fromRust(LLVMRustDIFlags Flags) {
DINode::DIFlags Result = DINode::DIFlags::FlagZero;
switch (visibility(Flags)) {
case LLVMRustDIFlags::FlagPrivate:
Result |= DINode::DIFlags::FlagPrivate;
break;
case LLVMRustDIFlags::FlagProtected:
Result |= DINode::DIFlags::FlagProtected;
break;
case LLVMRustDIFlags::FlagPublic:
Result |= DINode::DIFlags::FlagPublic;
break;
default:
// The rest are handled below
break;
}
if (isSet(Flags & LLVMRustDIFlags::FlagFwdDecl)) {
Result |= DINode::DIFlags::FlagFwdDecl;
}
if (isSet(Flags & LLVMRustDIFlags::FlagAppleBlock)) {
Result |= DINode::DIFlags::FlagAppleBlock;
}
if (isSet(Flags & LLVMRustDIFlags::FlagVirtual)) {
Result |= DINode::DIFlags::FlagVirtual;
}
if (isSet(Flags & LLVMRustDIFlags::FlagArtificial)) {
Result |= DINode::DIFlags::FlagArtificial;
}
if (isSet(Flags & LLVMRustDIFlags::FlagExplicit)) {
Result |= DINode::DIFlags::FlagExplicit;
}
if (isSet(Flags & LLVMRustDIFlags::FlagPrototyped)) {
Result |= DINode::DIFlags::FlagPrototyped;
}
if (isSet(Flags & LLVMRustDIFlags::FlagObjcClassComplete)) {
Result |= DINode::DIFlags::FlagObjcClassComplete;
}
if (isSet(Flags & LLVMRustDIFlags::FlagObjectPointer)) {
Result |= DINode::DIFlags::FlagObjectPointer;
}
if (isSet(Flags & LLVMRustDIFlags::FlagVector)) {
Result |= DINode::DIFlags::FlagVector;
}
if (isSet(Flags & LLVMRustDIFlags::FlagStaticMember)) {
Result |= DINode::DIFlags::FlagStaticMember;
}
if (isSet(Flags & LLVMRustDIFlags::FlagLValueReference)) {
Result |= DINode::DIFlags::FlagLValueReference;
}
if (isSet(Flags & LLVMRustDIFlags::FlagRValueReference)) {
Result |= DINode::DIFlags::FlagRValueReference;
}
if (isSet(Flags & LLVMRustDIFlags::FlagIntroducedVirtual)) {
Result |= DINode::DIFlags::FlagIntroducedVirtual;
}
if (isSet(Flags & LLVMRustDIFlags::FlagBitField)) {
Result |= DINode::DIFlags::FlagBitField;
}
if (isSet(Flags & LLVMRustDIFlags::FlagNoReturn)) {
Result |= DINode::DIFlags::FlagNoReturn;
}
return Result;
}
// These values **must** match debuginfo::DISPFlags! They also *happen*
// to match LLVM, but that isn't required as we do giant sets of
// matching below. The value shouldn't be directly passed to LLVM.
enum class LLVMRustDISPFlags : uint32_t {
SPFlagZero = 0,
SPFlagVirtual = 1,
SPFlagPureVirtual = 2,
SPFlagLocalToUnit = (1 << 2),
SPFlagDefinition = (1 << 3),
SPFlagOptimized = (1 << 4),
SPFlagMainSubprogram = (1 << 5),
// Do not add values that are not supported by the minimum LLVM
// version we support! see llvm/include/llvm/IR/DebugInfoFlags.def
// (In LLVM < 8, createFunction supported these as separate bool arguments.)
};
inline LLVMRustDISPFlags operator&(LLVMRustDISPFlags A, LLVMRustDISPFlags B) {
return static_cast<LLVMRustDISPFlags>(static_cast<uint32_t>(A) &
static_cast<uint32_t>(B));
}
inline LLVMRustDISPFlags operator|(LLVMRustDISPFlags A, LLVMRustDISPFlags B) {
return static_cast<LLVMRustDISPFlags>(static_cast<uint32_t>(A) |
static_cast<uint32_t>(B));
}
inline LLVMRustDISPFlags &operator|=(LLVMRustDISPFlags &A, LLVMRustDISPFlags B) {
return A = A | B;
}
inline bool isSet(LLVMRustDISPFlags F) { return F != LLVMRustDISPFlags::SPFlagZero; }
inline LLVMRustDISPFlags virtuality(LLVMRustDISPFlags F) {
return static_cast<LLVMRustDISPFlags>(static_cast<uint32_t>(F) & 0x3);
}
static DISubprogram::DISPFlags fromRust(LLVMRustDISPFlags SPFlags) {
DISubprogram::DISPFlags Result = DISubprogram::DISPFlags::SPFlagZero;
switch (virtuality(SPFlags)) {
case LLVMRustDISPFlags::SPFlagVirtual:
Result |= DISubprogram::DISPFlags::SPFlagVirtual;
break;
case LLVMRustDISPFlags::SPFlagPureVirtual:
Result |= DISubprogram::DISPFlags::SPFlagPureVirtual;
break;
default:
// The rest are handled below
break;
}
if (isSet(SPFlags & LLVMRustDISPFlags::SPFlagLocalToUnit)) {
Result |= DISubprogram::DISPFlags::SPFlagLocalToUnit;
}
if (isSet(SPFlags & LLVMRustDISPFlags::SPFlagDefinition)) {
Result |= DISubprogram::DISPFlags::SPFlagDefinition;
}
if (isSet(SPFlags & LLVMRustDISPFlags::SPFlagOptimized)) {
Result |= DISubprogram::DISPFlags::SPFlagOptimized;
}
if (isSet(SPFlags & LLVMRustDISPFlags::SPFlagMainSubprogram)) {
Result |= DISubprogram::DISPFlags::SPFlagMainSubprogram;
}
return Result;
}
2019-01-22 15:01:14 -06:00
enum class LLVMRustDebugEmissionKind {
NoDebug,
FullDebug,
LineTablesOnly,
};
static DICompileUnit::DebugEmissionKind fromRust(LLVMRustDebugEmissionKind Kind) {
switch (Kind) {
case LLVMRustDebugEmissionKind::NoDebug:
return DICompileUnit::DebugEmissionKind::NoDebug;
case LLVMRustDebugEmissionKind::FullDebug:
return DICompileUnit::DebugEmissionKind::FullDebug;
case LLVMRustDebugEmissionKind::LineTablesOnly:
return DICompileUnit::DebugEmissionKind::LineTablesOnly;
default:
report_fatal_error("bad DebugEmissionKind.");
}
}
enum class LLVMRustChecksumKind {
None,
MD5,
SHA1,
SHA256,
};
#if LLVM_VERSION_LT(16, 0)
static Optional<DIFile::ChecksumKind> fromRust(LLVMRustChecksumKind Kind) {
#else
static std::optional<DIFile::ChecksumKind> fromRust(LLVMRustChecksumKind Kind) {
#endif
switch (Kind) {
case LLVMRustChecksumKind::None:
#if LLVM_VERSION_LT(16, 0)
return None;
#else
return std::nullopt;
#endif
case LLVMRustChecksumKind::MD5:
return DIFile::ChecksumKind::CSK_MD5;
case LLVMRustChecksumKind::SHA1:
return DIFile::ChecksumKind::CSK_SHA1;
case LLVMRustChecksumKind::SHA256:
return DIFile::ChecksumKind::CSK_SHA256;
default:
report_fatal_error("bad ChecksumKind.");
}
}
2015-05-30 08:50:12 -05:00
extern "C" uint32_t LLVMRustDebugMetadataVersion() {
return DEBUG_METADATA_VERSION;
}
2020-10-12 21:33:27 -05:00
extern "C" uint32_t LLVMRustVersionPatch() { return LLVM_VERSION_PATCH; }
extern "C" uint32_t LLVMRustVersionMinor() { return LLVM_VERSION_MINOR; }
extern "C" uint32_t LLVMRustVersionMajor() { return LLVM_VERSION_MAJOR; }
extern "C" void LLVMRustAddModuleFlag(
LLVMModuleRef M,
Module::ModFlagBehavior MergeBehavior,
const char *Name,
uint32_t Value) {
unwrap(M)->addModuleFlag(MergeBehavior, Name, Value);
}
extern "C" bool LLVMRustHasModuleFlag(LLVMModuleRef M, const char *Name,
size_t Len) {
return unwrap(M)->getModuleFlag(StringRef(Name, Len)) != nullptr;
}
extern "C" LLVMValueRef LLVMRustMetadataAsValue(LLVMContextRef C, LLVMMetadataRef MD) {
return wrap(MetadataAsValue::get(*unwrap(C), unwrap(MD)));
}
Add metadata generation for vtables when using VFE This adds the typeid and `vcall_visibility` metadata to vtables when the -Cvirtual-function-elimination flag is set. The typeid is generated in the same way as for the `llvm.type.checked.load` intrinsic from the trait_ref. The offset that is added to the typeid is always 0. This is because LLVM assumes that vtables are constructed according to the definition in the Itanium ABI. This includes an "address point" of the vtable. In C++ this is the offset in the vtable where information for RTTI is placed. Since there is no RTTI information in Rust's vtables, this "address point" is always 0. This "address point" in combination with the offset passed to the `llvm.type.checked.load` intrinsic determines the final function that should be loaded from the vtable in the `WholeProgramDevirtualization` pass in LLVM. That's why the `llvm.type.checked.load` intrinsics are generated with the typeid of the trait, rather than with that of the function that is called. This matches what `clang` does for C++. The vcall_visibility metadata depends on three factors: 1. LTO level: Currently this is always fat LTO, because LLVM only supports this optimization with fat LTO. 2. Visibility of the trait: If the trait is publicly visible, VFE can only act on its vtables after linking. 3. Number of CGUs: if there is more than one CGU, also vtables with restricted visibility could be seen outside of the CGU, so VFE can only act on them after linking. To reflect this, there are three visibility levels: Public, LinkageUnit, and TranslationUnit.
2022-04-21 08:02:54 -05:00
extern "C" void LLVMRustGlobalAddMetadata(
LLVMValueRef Global, unsigned Kind, LLVMMetadataRef MD) {
unwrap<GlobalObject>(Global)->addMetadata(Kind, *unwrap<MDNode>(MD));
}
extern "C" LLVMRustDIBuilderRef LLVMRustDIBuilderCreate(LLVMModuleRef M) {
return new DIBuilder(*unwrap(M));
2013-06-14 13:38:29 -05:00
}
extern "C" void LLVMRustDIBuilderDispose(LLVMRustDIBuilderRef Builder) {
delete Builder;
2013-06-14 13:38:29 -05:00
}
extern "C" void LLVMRustDIBuilderFinalize(LLVMRustDIBuilderRef Builder) {
Builder->finalize();
2013-06-14 13:38:29 -05:00
}
2017-07-21 06:15:36 -05:00
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateCompileUnit(
LLVMRustDIBuilderRef Builder, unsigned Lang, LLVMMetadataRef FileRef,
const char *Producer, size_t ProducerLen, bool isOptimized,
const char *Flags, unsigned RuntimeVer,
const char *SplitName, size_t SplitNameLen,
LLVMRustDebugEmissionKind Kind,
uint64_t DWOId, bool SplitDebugInlining) {
2017-02-11 15:01:25 -06:00
auto *File = unwrapDI<DIFile>(FileRef);
return wrap(Builder->createCompileUnit(Lang, File, StringRef(Producer, ProducerLen),
isOptimized, Flags, RuntimeVer,
StringRef(SplitName, SplitNameLen),
fromRust(Kind), DWOId, SplitDebugInlining));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateFile(
LLVMRustDIBuilderRef Builder,
const char *Filename, size_t FilenameLen,
const char *Directory, size_t DirectoryLen, LLVMRustChecksumKind CSKind,
const char *Checksum, size_t ChecksumLen) {
#if LLVM_VERSION_LT(16, 0)
Optional<DIFile::ChecksumKind> llvmCSKind = fromRust(CSKind);
#else
std::optional<DIFile::ChecksumKind> llvmCSKind = fromRust(CSKind);
#endif
#if LLVM_VERSION_LT(16, 0)
Optional<DIFile::ChecksumInfo<StringRef>> CSInfo{};
#else
std::optional<DIFile::ChecksumInfo<StringRef>> CSInfo{};
#endif
if (llvmCSKind)
CSInfo.emplace(*llvmCSKind, StringRef{Checksum, ChecksumLen});
return wrap(Builder->createFile(StringRef(Filename, FilenameLen),
StringRef(Directory, DirectoryLen),
CSInfo));
}
2017-07-21 06:15:36 -05:00
extern "C" LLVMMetadataRef
LLVMRustDIBuilderCreateSubroutineType(LLVMRustDIBuilderRef Builder,
2017-07-21 06:15:36 -05:00
LLVMMetadataRef ParameterTypes) {
return wrap(Builder->createSubroutineType(
DITypeRefArray(unwrap<MDTuple>(ParameterTypes))));
2013-06-14 13:38:29 -05:00
}
2017-07-21 06:15:36 -05:00
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateFunction(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen,
const char *LinkageName, size_t LinkageNameLen,
LLVMMetadataRef File, unsigned LineNo,
LLVMMetadataRef Ty, unsigned ScopeLine, LLVMRustDIFlags Flags,
LLVMRustDISPFlags SPFlags, LLVMValueRef MaybeFn, LLVMMetadataRef TParam,
LLVMMetadataRef Decl) {
DITemplateParameterArray TParams =
DITemplateParameterArray(unwrap<MDTuple>(TParam));
DISubprogram::DISPFlags llvmSPFlags = fromRust(SPFlags);
DINode::DIFlags llvmFlags = fromRust(Flags);
DISubprogram *Sub = Builder->createFunction(
unwrapDI<DIScope>(Scope),
StringRef(Name, NameLen),
StringRef(LinkageName, LinkageNameLen),
unwrapDI<DIFile>(File), LineNo,
unwrapDI<DISubroutineType>(Ty), ScopeLine, llvmFlags,
llvmSPFlags, TParams, unwrapDIPtr<DISubprogram>(Decl));
if (MaybeFn)
unwrap<Function>(MaybeFn)->setSubprogram(Sub);
return wrap(Sub);
2013-06-14 13:38:29 -05:00
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateBasicType(
LLVMRustDIBuilderRef Builder, const char *Name, size_t NameLen,
uint64_t SizeInBits, unsigned Encoding) {
return wrap(Builder->createBasicType(StringRef(Name, NameLen), SizeInBits, Encoding));
2013-06-14 13:38:29 -05:00
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateTypedef(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Type, const char *Name, size_t NameLen,
LLVMMetadataRef File, unsigned LineNo, LLVMMetadataRef Scope) {
return wrap(Builder->createTypedef(
unwrap<DIType>(Type), StringRef(Name, NameLen), unwrap<DIFile>(File),
LineNo, unwrapDIPtr<DIScope>(Scope)));
}
2017-07-21 06:15:36 -05:00
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreatePointerType(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef PointeeTy,
uint64_t SizeInBits, uint32_t AlignInBits, unsigned AddressSpace,
const char *Name, size_t NameLen) {
return wrap(Builder->createPointerType(unwrapDI<DIType>(PointeeTy),
SizeInBits, AlignInBits,
AddressSpace,
StringRef(Name, NameLen)));
2013-06-14 13:38:29 -05:00
}
2017-07-21 06:15:36 -05:00
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateStructType(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen,
2017-07-21 06:15:36 -05:00
LLVMMetadataRef File, unsigned LineNumber, uint64_t SizeInBits,
uint32_t AlignInBits, LLVMRustDIFlags Flags,
2017-07-21 06:15:36 -05:00
LLVMMetadataRef DerivedFrom, LLVMMetadataRef Elements,
unsigned RunTimeLang, LLVMMetadataRef VTableHolder,
const char *UniqueId, size_t UniqueIdLen) {
return wrap(Builder->createStructType(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
unwrapDI<DIFile>(File), LineNumber,
SizeInBits, AlignInBits, fromRust(Flags), unwrapDI<DIType>(DerivedFrom),
DINodeArray(unwrapDI<MDTuple>(Elements)), RunTimeLang,
unwrapDI<DIType>(VTableHolder), StringRef(UniqueId, UniqueIdLen)));
2013-06-14 13:38:29 -05:00
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateVariantPart(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen,
LLVMMetadataRef File, unsigned LineNumber, uint64_t SizeInBits,
uint32_t AlignInBits, LLVMRustDIFlags Flags, LLVMMetadataRef Discriminator,
LLVMMetadataRef Elements, const char *UniqueId, size_t UniqueIdLen) {
return wrap(Builder->createVariantPart(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
unwrapDI<DIFile>(File), LineNumber,
SizeInBits, AlignInBits, fromRust(Flags), unwrapDI<DIDerivedType>(Discriminator),
DINodeArray(unwrapDI<MDTuple>(Elements)), StringRef(UniqueId, UniqueIdLen)));
}
2017-07-21 06:15:36 -05:00
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateMemberType(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen,
2017-07-21 06:15:36 -05:00
LLVMMetadataRef File, unsigned LineNo, uint64_t SizeInBits,
uint32_t AlignInBits, uint64_t OffsetInBits, LLVMRustDIFlags Flags,
2017-07-21 06:15:36 -05:00
LLVMMetadataRef Ty) {
return wrap(Builder->createMemberType(unwrapDI<DIDescriptor>(Scope),
StringRef(Name, NameLen),
unwrapDI<DIFile>(File), LineNo,
SizeInBits, AlignInBits, OffsetInBits,
fromRust(Flags), unwrapDI<DIType>(Ty)));
2013-06-14 13:38:29 -05:00
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateVariantMemberType(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen, LLVMMetadataRef File, unsigned LineNo,
uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, LLVMValueRef Discriminant,
LLVMRustDIFlags Flags, LLVMMetadataRef Ty) {
llvm::ConstantInt* D = nullptr;
if (Discriminant) {
D = unwrap<llvm::ConstantInt>(Discriminant);
}
return wrap(Builder->createVariantMemberType(unwrapDI<DIDescriptor>(Scope),
StringRef(Name, NameLen),
unwrapDI<DIFile>(File), LineNo,
SizeInBits, AlignInBits, OffsetInBits, D,
fromRust(Flags), unwrapDI<DIType>(Ty)));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateStaticMemberType(
LLVMRustDIBuilderRef Builder,
LLVMMetadataRef Scope,
const char *Name,
size_t NameLen,
LLVMMetadataRef File,
unsigned LineNo,
LLVMMetadataRef Ty,
LLVMRustDIFlags Flags,
LLVMValueRef val,
uint32_t AlignInBits
) {
return wrap(Builder->createStaticMemberType(
unwrapDI<DIDescriptor>(Scope),
StringRef(Name, NameLen),
unwrapDI<DIFile>(File),
LineNo,
unwrapDI<DIType>(Ty),
fromRust(Flags),
unwrap<llvm::ConstantInt>(val),
AlignInBits
));
}
2017-07-21 06:15:36 -05:00
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateLexicalBlock(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
LLVMMetadataRef File, unsigned Line, unsigned Col) {
return wrap(Builder->createLexicalBlock(unwrapDI<DIDescriptor>(Scope),
unwrapDI<DIFile>(File), Line, Col));
2013-06-14 13:38:29 -05:00
}
2017-07-21 06:15:36 -05:00
extern "C" LLVMMetadataRef
LLVMRustDIBuilderCreateLexicalBlockFile(LLVMRustDIBuilderRef Builder,
2017-07-21 06:15:36 -05:00
LLVMMetadataRef Scope,
LLVMMetadataRef File) {
return wrap(Builder->createLexicalBlockFile(unwrapDI<DIDescriptor>(Scope),
unwrapDI<DIFile>(File)));
}
2017-07-21 06:15:36 -05:00
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateStaticVariable(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Context,
const char *Name, size_t NameLen,
const char *LinkageName, size_t LinkageNameLen,
LLVMMetadataRef File, unsigned LineNo,
2017-07-21 06:15:36 -05:00
LLVMMetadataRef Ty, bool IsLocalToUnit, LLVMValueRef V,
LLVMMetadataRef Decl = nullptr, uint32_t AlignInBits = 0) {
2017-03-16 15:10:04 -05:00
llvm::GlobalVariable *InitVal = cast<llvm::GlobalVariable>(unwrap(V));
llvm::DIExpression *InitExpr = nullptr;
if (llvm::ConstantInt *IntVal = llvm::dyn_cast<llvm::ConstantInt>(InitVal)) {
InitExpr = Builder->createConstantValueExpression(
IntVal->getValue().getSExtValue());
} else if (llvm::ConstantFP *FPVal =
llvm::dyn_cast<llvm::ConstantFP>(InitVal)) {
InitExpr = Builder->createConstantValueExpression(
FPVal->getValueAPF().bitcastToAPInt().getZExtValue());
}
2017-03-16 15:10:04 -05:00
llvm::DIGlobalVariableExpression *VarExpr = Builder->createGlobalVariableExpression(
unwrapDI<DIDescriptor>(Context), StringRef(Name, NameLen),
StringRef(LinkageName, LinkageNameLen),
unwrapDI<DIFile>(File), LineNo, unwrapDI<DIType>(Ty), IsLocalToUnit,
/* isDefined */ true,
InitExpr, unwrapDIPtr<MDNode>(Decl),
/* templateParams */ nullptr,
AlignInBits);
2017-03-16 15:10:04 -05:00
InitVal->setMetadata("dbg", VarExpr);
return wrap(VarExpr);
}
2017-07-21 06:15:36 -05:00
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateVariable(
LLVMRustDIBuilderRef Builder, unsigned Tag, LLVMMetadataRef Scope,
const char *Name, size_t NameLen,
LLVMMetadataRef File, unsigned LineNo,
2017-07-21 06:15:36 -05:00
LLVMMetadataRef Ty, bool AlwaysPreserve, LLVMRustDIFlags Flags,
unsigned ArgNo, uint32_t AlignInBits) {
if (Tag == 0x100) { // DW_TAG_auto_variable
return wrap(Builder->createAutoVariable(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
unwrapDI<DIFile>(File), LineNo,
2018-06-29 03:28:51 -05:00
unwrapDI<DIType>(Ty), AlwaysPreserve, fromRust(Flags), AlignInBits));
} else {
return wrap(Builder->createParameterVariable(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen), ArgNo,
unwrapDI<DIFile>(File), LineNo,
unwrapDI<DIType>(Ty), AlwaysPreserve, fromRust(Flags)));
}
2013-06-14 13:38:29 -05:00
}
2017-07-21 06:15:36 -05:00
extern "C" LLVMMetadataRef
LLVMRustDIBuilderCreateArrayType(LLVMRustDIBuilderRef Builder, uint64_t Size,
2017-07-21 06:15:36 -05:00
uint32_t AlignInBits, LLVMMetadataRef Ty,
LLVMMetadataRef Subscripts) {
return wrap(
Builder->createArrayType(Size, AlignInBits, unwrapDI<DIType>(Ty),
DINodeArray(unwrapDI<MDTuple>(Subscripts))));
2013-06-14 13:38:29 -05:00
}
2017-07-21 06:15:36 -05:00
extern "C" LLVMMetadataRef
LLVMRustDIBuilderGetOrCreateSubrange(LLVMRustDIBuilderRef Builder, int64_t Lo,
int64_t Count) {
return wrap(Builder->getOrCreateSubrange(Lo, Count));
2013-06-14 13:38:29 -05:00
}
2017-07-21 06:15:36 -05:00
extern "C" LLVMMetadataRef
LLVMRustDIBuilderGetOrCreateArray(LLVMRustDIBuilderRef Builder,
2017-07-21 06:15:36 -05:00
LLVMMetadataRef *Ptr, unsigned Count) {
Metadata **DataValue = unwrap(Ptr);
return wrap(
Builder->getOrCreateArray(ArrayRef<Metadata *>(DataValue, Count)).get());
2013-06-14 13:38:29 -05:00
}
extern "C" LLVMValueRef LLVMRustDIBuilderInsertDeclareAtEnd(
2017-07-21 06:15:36 -05:00
LLVMRustDIBuilderRef Builder, LLVMValueRef V, LLVMMetadataRef VarInfo,
uint64_t *AddrOps, unsigned AddrOpsCount, LLVMMetadataRef DL,
LLVMBasicBlockRef InsertAtEnd) {
return wrap(Builder->insertDeclare(
unwrap(V), unwrap<DILocalVariable>(VarInfo),
Builder->createExpression(llvm::ArrayRef<uint64_t>(AddrOps, AddrOpsCount)),
DebugLoc(cast<MDNode>(unwrap(DL))),
unwrap(InsertAtEnd)));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateEnumerator(
LLVMRustDIBuilderRef Builder, const char *Name, size_t NameLen,
const uint64_t Value[2], unsigned SizeInBits, bool IsUnsigned) {
return wrap(Builder->createEnumerator(StringRef(Name, NameLen),
APSInt(APInt(SizeInBits, makeArrayRef(Value, 2)), IsUnsigned)));
}
2017-07-21 06:15:36 -05:00
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateEnumerationType(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen,
2017-07-21 06:15:36 -05:00
LLVMMetadataRef File, unsigned LineNumber, uint64_t SizeInBits,
uint32_t AlignInBits, LLVMMetadataRef Elements,
LLVMMetadataRef ClassTy, bool IsScoped) {
return wrap(Builder->createEnumerationType(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
unwrapDI<DIFile>(File), LineNumber,
SizeInBits, AlignInBits, DINodeArray(unwrapDI<MDTuple>(Elements)),
unwrapDI<DIType>(ClassTy), "", IsScoped));
}
2017-07-21 06:15:36 -05:00
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateUnionType(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen,
2017-07-21 06:15:36 -05:00
LLVMMetadataRef File, unsigned LineNumber, uint64_t SizeInBits,
uint32_t AlignInBits, LLVMRustDIFlags Flags, LLVMMetadataRef Elements,
unsigned RunTimeLang, const char *UniqueId, size_t UniqueIdLen) {
return wrap(Builder->createUnionType(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen), unwrapDI<DIFile>(File),
LineNumber, SizeInBits, AlignInBits, fromRust(Flags),
DINodeArray(unwrapDI<MDTuple>(Elements)), RunTimeLang,
StringRef(UniqueId, UniqueIdLen)));
2013-07-28 02:48:16 -05:00
}
2017-07-21 06:15:36 -05:00
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateTemplateTypeParameter(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen, LLVMMetadataRef Ty) {
2020-06-25 20:52:41 -05:00
bool IsDefault = false; // FIXME: should we ever set this true?
return wrap(Builder->createTemplateTypeParameter(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen), unwrapDI<DIType>(Ty), IsDefault));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateNameSpace(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen, bool ExportSymbols) {
return wrap(Builder->createNameSpace(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen), ExportSymbols
));
}
extern "C" void
LLVMRustDICompositeTypeReplaceArrays(LLVMRustDIBuilderRef Builder,
LLVMMetadataRef CompositeTy,
LLVMMetadataRef Elements,
LLVMMetadataRef Params) {
DICompositeType *Tmp = unwrapDI<DICompositeType>(CompositeTy);
Builder->replaceArrays(Tmp, DINodeArray(unwrap<MDTuple>(Elements)),
DINodeArray(unwrap<MDTuple>(Params)));
}
extern "C" LLVMMetadataRef
LLVMRustDIBuilderCreateDebugLocation(unsigned Line, unsigned Column,
2020-11-03 15:47:16 -06:00
LLVMMetadataRef ScopeRef,
2017-07-21 06:15:36 -05:00
LLVMMetadataRef InlinedAt) {
2020-11-03 15:47:16 -06:00
MDNode *Scope = unwrapDIPtr<MDNode>(ScopeRef);
DILocation *Loc = DILocation::get(
Scope->getContext(), Line, Column, Scope,
unwrapDIPtr<MDNode>(InlinedAt));
return wrap(Loc);
2015-01-30 12:25:07 -06:00
}
extern "C" uint64_t LLVMRustDIBuilderCreateOpDeref() {
return dwarf::DW_OP_deref;
}
extern "C" uint64_t LLVMRustDIBuilderCreateOpPlusUconst() {
return dwarf::DW_OP_plus_uconst;
}
2022-10-01 16:10:36 -05:00
extern "C" int64_t LLVMRustDIBuilderCreateOpLLVMFragment() {
return dwarf::DW_OP_LLVM_fragment;
}
extern "C" void LLVMRustWriteTypeToString(LLVMTypeRef Ty, RustStringRef Str) {
RawRustStringOstream OS(Str);
unwrap<llvm::Type>(Ty)->print(OS);
}
Implement LTO This commit implements LTO for rust leveraging LLVM's passes. What this means is: * When compiling an rlib, in addition to insdering foo.o into the archive, also insert foo.bc (the LLVM bytecode) of the optimized module. * When the compiler detects the -Z lto option, it will attempt to perform LTO on a staticlib or binary output. The compiler will emit an error if a dylib or rlib output is being generated. * The actual act of performing LTO is as follows: 1. Force all upstream libraries to have an rlib version available. 2. Load the bytecode of each upstream library from the rlib. 3. Link all this bytecode into the current LLVM module (just using llvm apis) 4. Run an internalization pass which internalizes all symbols except those found reachable for the local crate of compilation. 5. Run the LLVM LTO pass manager over this entire module 6a. If assembling an archive, then add all upstream rlibs into the output archive. This ignores all of the object/bitcode/metadata files rust generated and placed inside the rlibs. 6b. If linking a binary, create copies of all upstream rlibs, remove the rust-generated object-file, and then link everything as usual. As I have explained in #10741, this process is excruciatingly slow, so this is *not* turned on by default, and it is also why I have decided to hide it behind a -Z flag for now. The good news is that the binary sizes are about as small as they can be as a result of LTO, so it's definitely working. Closes #10741 Closes #10740
2013-12-03 01:19:29 -06:00
extern "C" void LLVMRustWriteValueToString(LLVMValueRef V,
RustStringRef Str) {
RawRustStringOstream OS(Str);
2017-05-18 10:43:52 -05:00
if (!V) {
OS << "(null)";
} else {
OS << "(";
unwrap<llvm::Value>(V)->getType()->print(OS);
OS << ":";
unwrap<llvm::Value>(V)->print(OS);
OS << ")";
}
}
// LLVMArrayType function does not support 64-bit ElementCount
extern "C" LLVMTypeRef LLVMRustArrayType(LLVMTypeRef ElementTy,
uint64_t ElementCount) {
return wrap(ArrayType::get(unwrap(ElementTy), ElementCount));
}
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Twine, LLVMTwineRef)
extern "C" void LLVMRustWriteTwineToString(LLVMTwineRef T, RustStringRef Str) {
RawRustStringOstream OS(Str);
unwrap(T)->print(OS);
}
extern "C" void LLVMRustUnpackOptimizationDiagnostic(
LLVMDiagnosticInfoRef DI, RustStringRef PassNameOut,
LLVMValueRef *FunctionOut, unsigned* Line, unsigned* Column,
RustStringRef FilenameOut, RustStringRef MessageOut) {
// Undefined to call this not on an optimization diagnostic!
llvm::DiagnosticInfoOptimizationBase *Opt =
static_cast<llvm::DiagnosticInfoOptimizationBase *>(unwrap(DI));
RawRustStringOstream PassNameOS(PassNameOut);
PassNameOS << Opt->getPassName();
*FunctionOut = wrap(&Opt->getFunction());
RawRustStringOstream FilenameOS(FilenameOut);
DiagnosticLocation loc = Opt->getLocation();
if (loc.isValid()) {
*Line = loc.getLine();
*Column = loc.getColumn();
FilenameOS << loc.getAbsolutePath();
}
RawRustStringOstream MessageOS(MessageOut);
MessageOS << Opt->getMsg();
}
2020-06-09 08:37:59 -05:00
enum class LLVMRustDiagnosticLevel {
Error,
Warning,
Note,
Remark,
};
extern "C" void
2020-06-09 08:37:59 -05:00
LLVMRustUnpackInlineAsmDiagnostic(LLVMDiagnosticInfoRef DI,
LLVMRustDiagnosticLevel *LevelOut,
unsigned *CookieOut,
LLVMTwineRef *MessageOut) {
// Undefined to call this not on an inline assembly diagnostic!
llvm::DiagnosticInfoInlineAsm *IA =
static_cast<llvm::DiagnosticInfoInlineAsm *>(unwrap(DI));
2015-01-22 12:43:39 -06:00
*CookieOut = IA->getLocCookie();
*MessageOut = wrap(&IA->getMsgStr());
2020-06-09 08:37:59 -05:00
switch (IA->getSeverity()) {
case DS_Error:
*LevelOut = LLVMRustDiagnosticLevel::Error;
break;
case DS_Warning:
*LevelOut = LLVMRustDiagnosticLevel::Warning;
break;
case DS_Note:
*LevelOut = LLVMRustDiagnosticLevel::Note;
break;
case DS_Remark:
*LevelOut = LLVMRustDiagnosticLevel::Remark;
break;
default:
report_fatal_error("Invalid LLVMRustDiagnosticLevel value!");
}
2015-01-22 12:43:39 -06:00
}
extern "C" void LLVMRustWriteDiagnosticInfoToString(LLVMDiagnosticInfoRef DI,
RustStringRef Str) {
RawRustStringOstream OS(Str);
DiagnosticPrinterRawOStream DP(OS);
unwrap(DI)->print(DP);
}
enum class LLVMRustDiagnosticKind {
Other,
InlineAsm,
StackSize,
DebugMetadataVersion,
SampleProfile,
OptimizationRemark,
OptimizationRemarkMissed,
OptimizationRemarkAnalysis,
OptimizationRemarkAnalysisFPCommute,
OptimizationRemarkAnalysisAliasing,
OptimizationRemarkOther,
OptimizationFailure,
PGOProfile,
Linker,
Unsupported,
SrcMgr,
};
static LLVMRustDiagnosticKind toRust(DiagnosticKind Kind) {
switch (Kind) {
case DK_InlineAsm:
return LLVMRustDiagnosticKind::InlineAsm;
case DK_StackSize:
return LLVMRustDiagnosticKind::StackSize;
case DK_DebugMetadataVersion:
return LLVMRustDiagnosticKind::DebugMetadataVersion;
case DK_SampleProfile:
return LLVMRustDiagnosticKind::SampleProfile;
case DK_OptimizationRemark:
2021-11-11 18:00:00 -06:00
case DK_MachineOptimizationRemark:
return LLVMRustDiagnosticKind::OptimizationRemark;
case DK_OptimizationRemarkMissed:
2021-11-11 18:00:00 -06:00
case DK_MachineOptimizationRemarkMissed:
return LLVMRustDiagnosticKind::OptimizationRemarkMissed;
case DK_OptimizationRemarkAnalysis:
2021-11-11 18:00:00 -06:00
case DK_MachineOptimizationRemarkAnalysis:
return LLVMRustDiagnosticKind::OptimizationRemarkAnalysis;
case DK_OptimizationRemarkAnalysisFPCommute:
return LLVMRustDiagnosticKind::OptimizationRemarkAnalysisFPCommute;
case DK_OptimizationRemarkAnalysisAliasing:
return LLVMRustDiagnosticKind::OptimizationRemarkAnalysisAliasing;
case DK_PGOProfile:
return LLVMRustDiagnosticKind::PGOProfile;
case DK_Linker:
return LLVMRustDiagnosticKind::Linker;
case DK_Unsupported:
return LLVMRustDiagnosticKind::Unsupported;
case DK_SrcMgr:
return LLVMRustDiagnosticKind::SrcMgr;
default:
return (Kind >= DK_FirstRemark && Kind <= DK_LastRemark)
? LLVMRustDiagnosticKind::OptimizationRemarkOther
: LLVMRustDiagnosticKind::Other;
}
}
extern "C" LLVMRustDiagnosticKind
LLVMRustGetDiagInfoKind(LLVMDiagnosticInfoRef DI) {
return toRust((DiagnosticKind)unwrap(DI)->getKind());
}
2020-06-09 08:37:59 -05:00
2016-08-02 16:25:19 -05:00
// This is kept distinct from LLVMGetTypeKind, because when
// a new type kind is added, the Rust-side enum must be
// updated or UB will result.
extern "C" LLVMTypeKind LLVMRustGetTypeKind(LLVMTypeRef Ty) {
switch (unwrap(Ty)->getTypeID()) {
case Type::VoidTyID:
return LLVMVoidTypeKind;
case Type::HalfTyID:
return LLVMHalfTypeKind;
case Type::FloatTyID:
return LLVMFloatTypeKind;
case Type::DoubleTyID:
return LLVMDoubleTypeKind;
case Type::X86_FP80TyID:
return LLVMX86_FP80TypeKind;
case Type::FP128TyID:
return LLVMFP128TypeKind;
case Type::PPC_FP128TyID:
return LLVMPPC_FP128TypeKind;
case Type::LabelTyID:
return LLVMLabelTypeKind;
case Type::MetadataTyID:
return LLVMMetadataTypeKind;
case Type::IntegerTyID:
return LLVMIntegerTypeKind;
case Type::FunctionTyID:
return LLVMFunctionTypeKind;
case Type::StructTyID:
return LLVMStructTypeKind;
case Type::ArrayTyID:
return LLVMArrayTypeKind;
case Type::PointerTyID:
return LLVMPointerTypeKind;
2020-06-25 20:52:41 -05:00
case Type::FixedVectorTyID:
return LLVMVectorTypeKind;
2016-08-02 16:25:19 -05:00
case Type::X86_MMXTyID:
return LLVMX86_MMXTypeKind;
case Type::TokenTyID:
return LLVMTokenTypeKind;
2020-06-25 20:52:41 -05:00
case Type::ScalableVectorTyID:
return LLVMScalableVectorTypeKind;
case Type::BFloatTyID:
return LLVMBFloatTypeKind;
2020-11-03 15:47:16 -06:00
case Type::X86_AMXTyID:
return LLVMX86_AMXTypeKind;
#if LLVM_VERSION_GE(15, 0) && LLVM_VERSION_LT(16, 0)
case Type::DXILPointerTyID:
report_fatal_error("Rust does not support DirectX typed pointers.");
break;
#endif
#if LLVM_VERSION_GE(16, 0)
case Type::TypedPointerTyID:
report_fatal_error("Rust does not support typed pointers.");
break;
#endif
2016-08-02 16:25:19 -05:00
}
report_fatal_error("Unhandled TypeID.");
2016-08-02 16:25:19 -05:00
}
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(SMDiagnostic, LLVMSMDiagnosticRef)
extern "C" LLVMSMDiagnosticRef LLVMRustGetSMDiagnostic(
LLVMDiagnosticInfoRef DI, unsigned *Cookie) {
llvm::DiagnosticInfoSrcMgr *SM = static_cast<llvm::DiagnosticInfoSrcMgr *>(unwrap(DI));
*Cookie = SM->getLocCookie();
return wrap(&SM->getSMDiag());
}
2020-05-26 14:07:59 -05:00
extern "C" bool LLVMRustUnpackSMDiagnostic(LLVMSMDiagnosticRef DRef,
RustStringRef MessageOut,
RustStringRef BufferOut,
2020-06-09 08:37:59 -05:00
LLVMRustDiagnosticLevel* LevelOut,
2020-05-26 14:07:59 -05:00
unsigned* LocOut,
unsigned* RangesOut,
size_t* NumRanges) {
SMDiagnostic& D = *unwrap(DRef);
RawRustStringOstream MessageOS(MessageOut);
MessageOS << D.getMessage();
2020-06-09 08:37:59 -05:00
switch (D.getKind()) {
case SourceMgr::DK_Error:
*LevelOut = LLVMRustDiagnosticLevel::Error;
break;
case SourceMgr::DK_Warning:
*LevelOut = LLVMRustDiagnosticLevel::Warning;
break;
case SourceMgr::DK_Note:
*LevelOut = LLVMRustDiagnosticLevel::Note;
break;
case SourceMgr::DK_Remark:
*LevelOut = LLVMRustDiagnosticLevel::Remark;
break;
default:
report_fatal_error("Invalid LLVMRustDiagnosticLevel value!");
}
2020-05-26 14:07:59 -05:00
if (D.getLoc() == SMLoc())
return false;
const SourceMgr &LSM = *D.getSourceMgr();
const MemoryBuffer *LBuf = LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc()));
LLVMRustStringWriteImpl(BufferOut, LBuf->getBufferStart(), LBuf->getBufferSize());
*LocOut = D.getLoc().getPointer() - LBuf->getBufferStart();
*NumRanges = std::min(*NumRanges, D.getRanges().size());
size_t LineStart = *LocOut - (size_t)D.getColumnNo();
for (size_t i = 0; i < *NumRanges; i++) {
RangesOut[i * 2] = LineStart + D.getRanges()[i].first;
RangesOut[i * 2 + 1] = LineStart + D.getRanges()[i].second;
}
return true;
}
extern "C" LLVMValueRef LLVMRustBuildCleanupPad(LLVMBuilderRef B,
LLVMValueRef ParentPad,
unsigned ArgCount,
LLVMValueRef *LLArgs,
const char *Name) {
Value **Args = unwrap(LLArgs);
if (ParentPad == nullptr) {
Type *Ty = Type::getTokenTy(unwrap(B)->getContext());
ParentPad = wrap(Constant::getNullValue(Ty));
}
return wrap(unwrap(B)->CreateCleanupPad(
unwrap(ParentPad), ArrayRef<Value *>(Args, ArgCount), Name));
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
}
extern "C" LLVMValueRef LLVMRustBuildCleanupRet(LLVMBuilderRef B,
LLVMValueRef CleanupPad,
LLVMBasicBlockRef UnwindBB) {
CleanupPadInst *Inst = cast<CleanupPadInst>(unwrap(CleanupPad));
return wrap(unwrap(B)->CreateCleanupRet(Inst, unwrap(UnwindBB)));
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
}
extern "C" LLVMValueRef
LLVMRustBuildCatchPad(LLVMBuilderRef B, LLVMValueRef ParentPad,
unsigned ArgCount, LLVMValueRef *LLArgs, const char *Name) {
Value **Args = unwrap(LLArgs);
return wrap(unwrap(B)->CreateCatchPad(
unwrap(ParentPad), ArrayRef<Value *>(Args, ArgCount), Name));
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
}
extern "C" LLVMValueRef LLVMRustBuildCatchRet(LLVMBuilderRef B,
LLVMValueRef Pad,
LLVMBasicBlockRef BB) {
return wrap(unwrap(B)->CreateCatchRet(cast<CatchPadInst>(unwrap(Pad)),
unwrap(BB)));
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
}
extern "C" LLVMValueRef LLVMRustBuildCatchSwitch(LLVMBuilderRef B,
LLVMValueRef ParentPad,
LLVMBasicBlockRef BB,
unsigned NumHandlers,
const char *Name) {
if (ParentPad == nullptr) {
Type *Ty = Type::getTokenTy(unwrap(B)->getContext());
ParentPad = wrap(Constant::getNullValue(Ty));
}
return wrap(unwrap(B)->CreateCatchSwitch(unwrap(ParentPad), unwrap(BB),
NumHandlers, Name));
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
}
extern "C" void LLVMRustAddHandler(LLVMValueRef CatchSwitchRef,
LLVMBasicBlockRef Handler) {
Value *CatchSwitch = unwrap(CatchSwitchRef);
cast<CatchSwitchInst>(CatchSwitch)->addHandler(unwrap(Handler));
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
}
extern "C" OperandBundleDef *LLVMRustBuildOperandBundleDef(const char *Name,
LLVMValueRef *Inputs,
unsigned NumInputs) {
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
return new OperandBundleDef(Name, makeArrayRef(unwrap(Inputs), NumInputs));
}
extern "C" void LLVMRustFreeOperandBundleDef(OperandBundleDef *Bundle) {
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
delete Bundle;
}
extern "C" LLVMValueRef LLVMRustBuildCall(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
LLVMValueRef *Args, unsigned NumArgs,
OperandBundleDef **OpBundles,
unsigned NumOpBundles) {
2020-06-25 20:52:41 -05:00
Value *Callee = unwrap(Fn);
FunctionType *FTy = unwrap<FunctionType>(Ty);
return wrap(unwrap(B)->CreateCall(
FTy, Callee, makeArrayRef(unwrap(Args), NumArgs),
makeArrayRef(*OpBundles, NumOpBundles)));
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
}
extern "C" LLVMValueRef LLVMRustGetInstrProfIncrementIntrinsic(LLVMModuleRef M) {
return wrap(llvm::Intrinsic::getDeclaration(unwrap(M),
(llvm::Intrinsic::ID)llvm::Intrinsic::instrprof_increment));
}
extern "C" LLVMValueRef LLVMRustBuildMemCpy(LLVMBuilderRef B,
LLVMValueRef Dst, unsigned DstAlign,
LLVMValueRef Src, unsigned SrcAlign,
LLVMValueRef Size, bool IsVolatile) {
return wrap(unwrap(B)->CreateMemCpy(
unwrap(Dst), MaybeAlign(DstAlign),
unwrap(Src), MaybeAlign(SrcAlign),
unwrap(Size), IsVolatile));
}
extern "C" LLVMValueRef LLVMRustBuildMemMove(LLVMBuilderRef B,
LLVMValueRef Dst, unsigned DstAlign,
LLVMValueRef Src, unsigned SrcAlign,
LLVMValueRef Size, bool IsVolatile) {
return wrap(unwrap(B)->CreateMemMove(
unwrap(Dst), MaybeAlign(DstAlign),
unwrap(Src), MaybeAlign(SrcAlign),
unwrap(Size), IsVolatile));
}
extern "C" LLVMValueRef LLVMRustBuildMemSet(LLVMBuilderRef B,
LLVMValueRef Dst, unsigned DstAlign,
LLVMValueRef Val,
LLVMValueRef Size, bool IsVolatile) {
return wrap(unwrap(B)->CreateMemSet(
unwrap(Dst), unwrap(Val), unwrap(Size), MaybeAlign(DstAlign), IsVolatile));
}
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
extern "C" LLVMValueRef
LLVMRustBuildInvoke(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
LLVMValueRef *Args, unsigned NumArgs,
LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
OperandBundleDef **OpBundles, unsigned NumOpBundles,
const char *Name) {
2020-06-25 20:52:41 -05:00
Value *Callee = unwrap(Fn);
FunctionType *FTy = unwrap<FunctionType>(Ty);
2020-06-25 20:52:41 -05:00
return wrap(unwrap(B)->CreateInvoke(FTy, Callee, unwrap(Then), unwrap(Catch),
makeArrayRef(unwrap(Args), NumArgs),
makeArrayRef(*OpBundles, NumOpBundles),
Name));
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 20:18:44 -05:00
}
extern "C" void LLVMRustPositionBuilderAtStart(LLVMBuilderRef B,
LLVMBasicBlockRef BB) {
auto Point = unwrap(BB)->getFirstInsertionPt();
unwrap(B)->SetInsertPoint(unwrap(BB), Point);
}
extern "C" void LLVMRustSetComdat(LLVMModuleRef M, LLVMValueRef V,
const char *Name, size_t NameLen) {
Triple TargetTriple(unwrap(M)->getTargetTriple());
GlobalObject *GV = unwrap<GlobalObject>(V);
if (TargetTriple.supportsCOMDAT()) {
StringRef NameRef(Name, NameLen);
GV->setComdat(unwrap(M)->getOrInsertComdat(NameRef));
}
}
enum class LLVMRustLinkage {
ExternalLinkage = 0,
AvailableExternallyLinkage = 1,
LinkOnceAnyLinkage = 2,
LinkOnceODRLinkage = 3,
WeakAnyLinkage = 4,
WeakODRLinkage = 5,
AppendingLinkage = 6,
InternalLinkage = 7,
PrivateLinkage = 8,
ExternalWeakLinkage = 9,
CommonLinkage = 10,
};
static LLVMRustLinkage toRust(LLVMLinkage Linkage) {
switch (Linkage) {
case LLVMExternalLinkage:
return LLVMRustLinkage::ExternalLinkage;
case LLVMAvailableExternallyLinkage:
return LLVMRustLinkage::AvailableExternallyLinkage;
case LLVMLinkOnceAnyLinkage:
return LLVMRustLinkage::LinkOnceAnyLinkage;
case LLVMLinkOnceODRLinkage:
return LLVMRustLinkage::LinkOnceODRLinkage;
case LLVMWeakAnyLinkage:
return LLVMRustLinkage::WeakAnyLinkage;
case LLVMWeakODRLinkage:
return LLVMRustLinkage::WeakODRLinkage;
case LLVMAppendingLinkage:
return LLVMRustLinkage::AppendingLinkage;
case LLVMInternalLinkage:
return LLVMRustLinkage::InternalLinkage;
case LLVMPrivateLinkage:
return LLVMRustLinkage::PrivateLinkage;
case LLVMExternalWeakLinkage:
return LLVMRustLinkage::ExternalWeakLinkage;
case LLVMCommonLinkage:
return LLVMRustLinkage::CommonLinkage;
default:
report_fatal_error("Invalid LLVMRustLinkage value!");
}
}
static LLVMLinkage fromRust(LLVMRustLinkage Linkage) {
switch (Linkage) {
case LLVMRustLinkage::ExternalLinkage:
return LLVMExternalLinkage;
case LLVMRustLinkage::AvailableExternallyLinkage:
return LLVMAvailableExternallyLinkage;
case LLVMRustLinkage::LinkOnceAnyLinkage:
return LLVMLinkOnceAnyLinkage;
case LLVMRustLinkage::LinkOnceODRLinkage:
return LLVMLinkOnceODRLinkage;
case LLVMRustLinkage::WeakAnyLinkage:
return LLVMWeakAnyLinkage;
case LLVMRustLinkage::WeakODRLinkage:
return LLVMWeakODRLinkage;
case LLVMRustLinkage::AppendingLinkage:
return LLVMAppendingLinkage;
case LLVMRustLinkage::InternalLinkage:
return LLVMInternalLinkage;
case LLVMRustLinkage::PrivateLinkage:
return LLVMPrivateLinkage;
case LLVMRustLinkage::ExternalWeakLinkage:
return LLVMExternalWeakLinkage;
case LLVMRustLinkage::CommonLinkage:
return LLVMCommonLinkage;
}
report_fatal_error("Invalid LLVMRustLinkage value!");
}
extern "C" LLVMRustLinkage LLVMRustGetLinkage(LLVMValueRef V) {
return toRust(LLVMGetLinkage(V));
}
extern "C" void LLVMRustSetLinkage(LLVMValueRef V,
LLVMRustLinkage RustLinkage) {
LLVMSetLinkage(V, fromRust(RustLinkage));
}
extern "C" LLVMValueRef LLVMRustConstInBoundsGEP2(LLVMTypeRef Ty,
LLVMValueRef ConstantVal,
LLVMValueRef *ConstantIndices,
unsigned NumIndices) {
ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices),
NumIndices);
Constant *Val = unwrap<Constant>(ConstantVal);
return wrap(ConstantExpr::getInBoundsGetElementPtr(unwrap(Ty), Val, IdxList));
}
extern "C" bool LLVMRustConstIntGetZExtValue(LLVMValueRef CV, uint64_t *value) {
auto C = unwrap<llvm::ConstantInt>(CV);
if (C->getBitWidth() > 64)
return false;
*value = C->getZExtValue();
return true;
}
// Returns true if both high and low were successfully set. Fails in case constant wasnt any of
// the common sizes (1, 8, 16, 32, 64, 128 bits)
extern "C" bool LLVMRustConstInt128Get(LLVMValueRef CV, bool sext, uint64_t *high, uint64_t *low)
{
auto C = unwrap<llvm::ConstantInt>(CV);
if (C->getBitWidth() > 128) { return false; }
APInt AP;
#if LLVM_VERSION_GE(15, 0)
if (sext) {
AP = C->getValue().sext(128);
} else {
AP = C->getValue().zext(128);
}
#else
if (sext) {
AP = C->getValue().sextOrSelf(128);
} else {
AP = C->getValue().zextOrSelf(128);
}
#endif
*low = AP.getLoBits(64).getZExtValue();
*high = AP.getHiBits(64).getZExtValue();
return true;
}
enum class LLVMRustVisibility {
Default = 0,
Hidden = 1,
Protected = 2,
};
static LLVMRustVisibility toRust(LLVMVisibility Vis) {
switch (Vis) {
case LLVMDefaultVisibility:
return LLVMRustVisibility::Default;
case LLVMHiddenVisibility:
return LLVMRustVisibility::Hidden;
case LLVMProtectedVisibility:
return LLVMRustVisibility::Protected;
}
report_fatal_error("Invalid LLVMRustVisibility value!");
}
static LLVMVisibility fromRust(LLVMRustVisibility Vis) {
switch (Vis) {
case LLVMRustVisibility::Default:
return LLVMDefaultVisibility;
case LLVMRustVisibility::Hidden:
return LLVMHiddenVisibility;
case LLVMRustVisibility::Protected:
return LLVMProtectedVisibility;
}
report_fatal_error("Invalid LLVMRustVisibility value!");
}
extern "C" LLVMRustVisibility LLVMRustGetVisibility(LLVMValueRef V) {
return toRust(LLVMGetVisibility(V));
}
2017-02-10 11:29:39 -06:00
// Oh hey, a binding that makes sense for once? (because LLVMs own do not)
extern "C" LLVMValueRef LLVMRustBuildIntCast(LLVMBuilderRef B, LLVMValueRef Val,
LLVMTypeRef DestTy, bool isSigned) {
return wrap(unwrap(B)->CreateIntCast(unwrap(Val), unwrap(DestTy), isSigned, ""));
}
extern "C" void LLVMRustSetVisibility(LLVMValueRef V,
LLVMRustVisibility RustVisibility) {
LLVMSetVisibility(V, fromRust(RustVisibility));
}
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 10:14:38 -05:00
extern "C" void LLVMRustSetDSOLocal(LLVMValueRef Global, bool is_dso_local) {
unwrap<GlobalValue>(Global)->setDSOLocal(is_dso_local);
}
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 10:14:38 -05:00
struct LLVMRustModuleBuffer {
std::string data;
};
extern "C" LLVMRustModuleBuffer*
LLVMRustModuleBufferCreate(LLVMModuleRef M) {
2019-12-31 07:08:25 -06:00
auto Ret = std::make_unique<LLVMRustModuleBuffer>();
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 10:14:38 -05:00
{
raw_string_ostream OS(Ret->data);
WriteBitcodeToFile(*unwrap(M), OS);
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 10:14:38 -05:00
}
return Ret.release();
}
extern "C" void
LLVMRustModuleBufferFree(LLVMRustModuleBuffer *Buffer) {
delete Buffer;
}
extern "C" const void*
LLVMRustModuleBufferPtr(const LLVMRustModuleBuffer *Buffer) {
return Buffer->data.data();
}
extern "C" size_t
LLVMRustModuleBufferLen(const LLVMRustModuleBuffer *Buffer) {
return Buffer->data.length();
}
extern "C" uint64_t
LLVMRustModuleCost(LLVMModuleRef M) {
2017-12-08 04:23:23 -06:00
auto f = unwrap(M)->functions();
return std::distance(std::begin(f), std::end(f));
rustc: Enable LTO and multiple codegen units This commit is a refactoring of the LTO backend in Rust to support compilations with multiple codegen units. The immediate result of this PR is to remove the artificial error emitted by rustc about `-C lto -C codegen-units-8`, but longer term this is intended to lay the groundwork for LTO with incremental compilation and ultimately be the underpinning of ThinLTO support. The problem here that needed solving is that when rustc is producing multiple codegen units in one compilation LTO needs to merge them all together. Previously only upstream dependencies were merged and it was inherently relied on that there was only one local codegen unit. Supporting this involved refactoring the optimization backend architecture for rustc, namely splitting the `optimize_and_codegen` function into `optimize` and `codegen`. After an LLVM module has been optimized it may be blocked and queued up for LTO, and only after LTO are modules code generated. Non-LTO compilations should look the same as they do today backend-wise, we'll spin up a thread for each codegen unit and optimize/codegen in that thread. LTO compilations will, however, send the LLVM module back to the coordinator thread once optimizations have finished. When all LLVM modules have finished optimizing the coordinator will invoke the LTO backend, producing a further list of LLVM modules. Currently this is always a list of one LLVM module. The coordinator then spawns further work to run LTO and code generation passes over each module. In the course of this refactoring a number of other pieces were refactored: * Management of the bytecode encoding in rlibs was centralized into one module instead of being scattered across LTO and linking. * Some internal refactorings on the link stage of the compiler was done to work directly from `CompiledModule` structures instead of lists of paths. * The trans time-graph output was tweaked a little to include a name on each bar and inflate the size of the bars a little
2017-07-23 10:14:38 -05:00
}
// Vector reductions:
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceFAdd(LLVMBuilderRef B, LLVMValueRef Acc, LLVMValueRef Src) {
return wrap(unwrap(B)->CreateFAddReduce(unwrap(Acc),unwrap(Src)));
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceFMul(LLVMBuilderRef B, LLVMValueRef Acc, LLVMValueRef Src) {
return wrap(unwrap(B)->CreateFMulReduce(unwrap(Acc),unwrap(Src)));
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceAdd(LLVMBuilderRef B, LLVMValueRef Src) {
return wrap(unwrap(B)->CreateAddReduce(unwrap(Src)));
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceMul(LLVMBuilderRef B, LLVMValueRef Src) {
return wrap(unwrap(B)->CreateMulReduce(unwrap(Src)));
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceAnd(LLVMBuilderRef B, LLVMValueRef Src) {
return wrap(unwrap(B)->CreateAndReduce(unwrap(Src)));
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceOr(LLVMBuilderRef B, LLVMValueRef Src) {
return wrap(unwrap(B)->CreateOrReduce(unwrap(Src)));
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceXor(LLVMBuilderRef B, LLVMValueRef Src) {
return wrap(unwrap(B)->CreateXorReduce(unwrap(Src)));
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceMin(LLVMBuilderRef B, LLVMValueRef Src, bool IsSigned) {
return wrap(unwrap(B)->CreateIntMinReduce(unwrap(Src), IsSigned));
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceMax(LLVMBuilderRef B, LLVMValueRef Src, bool IsSigned) {
return wrap(unwrap(B)->CreateIntMaxReduce(unwrap(Src), IsSigned));
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceFMin(LLVMBuilderRef B, LLVMValueRef Src, bool NoNaN) {
2020-11-03 15:47:16 -06:00
Instruction *I = unwrap(B)->CreateFPMinReduce(unwrap(Src));
I->setHasNoNaNs(NoNaN);
return wrap(I);
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceFMax(LLVMBuilderRef B, LLVMValueRef Src, bool NoNaN) {
2020-11-03 15:47:16 -06:00
Instruction *I = unwrap(B)->CreateFPMaxReduce(unwrap(Src));
I->setHasNoNaNs(NoNaN);
return wrap(I);
}
2018-03-14 16:12:38 -05:00
2018-03-21 15:49:22 -05:00
extern "C" LLVMValueRef
LLVMRustBuildMinNum(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS) {
return wrap(unwrap(B)->CreateMinNum(unwrap(LHS),unwrap(RHS)));
}
extern "C" LLVMValueRef
LLVMRustBuildMaxNum(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS) {
return wrap(unwrap(B)->CreateMaxNum(unwrap(LHS),unwrap(RHS)));
}
// This struct contains all necessary info about a symbol exported from a DLL.
struct LLVMRustCOFFShortExport {
const char* name;
bool ordinal_present;
// The value of `ordinal` is only meaningful if `ordinal_present` is true.
uint16_t ordinal;
};
// Machine must be a COFF machine type, as defined in PE specs.
extern "C" LLVMRustResult LLVMRustWriteImportLibrary(
const char* ImportName,
const char* Path,
const LLVMRustCOFFShortExport* Exports,
size_t NumExports,
uint16_t Machine,
bool MinGW)
{
std::vector<llvm::object::COFFShortExport> ConvertedExports;
ConvertedExports.reserve(NumExports);
for (size_t i = 0; i < NumExports; ++i) {
bool ordinal_present = Exports[i].ordinal_present;
uint16_t ordinal = ordinal_present ? Exports[i].ordinal : 0;
ConvertedExports.push_back(llvm::object::COFFShortExport{
Exports[i].name, // Name
std::string{}, // ExtName
std::string{}, // SymbolName
std::string{}, // AliasTarget
ordinal, // Ordinal
ordinal_present, // Noname
false, // Data
false, // Private
false // Constant
});
}
auto Error = llvm::object::writeImportLibrary(
ImportName,
Path,
ConvertedExports,
static_cast<llvm::COFF::MachineTypes>(Machine),
MinGW);
if (Error) {
std::string errorString;
llvm::raw_string_ostream stream(errorString);
stream << Error;
stream.flush();
LLVMRustSetLastError(errorString.c_str());
return LLVMRustResult::Failure;
} else {
return LLVMRustResult::Success;
}
}
// Transfers ownership of DiagnosticHandler unique_ptr to the caller.
extern "C" DiagnosticHandler *
LLVMRustContextGetDiagnosticHandler(LLVMContextRef C) {
std::unique_ptr<DiagnosticHandler> DH = unwrap(C)->getDiagnosticHandler();
return DH.release();
}
// Sets unique_ptr to object of DiagnosticHandler to provide custom diagnostic
// handling. Ownership of the handler is moved to the LLVMContext.
extern "C" void LLVMRustContextSetDiagnosticHandler(LLVMContextRef C,
DiagnosticHandler *DH) {
unwrap(C)->setDiagnosticHandler(std::unique_ptr<DiagnosticHandler>(DH));
}
using LLVMDiagnosticHandlerTy = DiagnosticHandler::DiagnosticHandlerTy;
// Configures a diagnostic handler that invokes provided callback when a
// backend needs to emit a diagnostic.
//
// When RemarkAllPasses is true, remarks are enabled for all passes. Otherwise
// the RemarkPasses array specifies individual passes for which remarks will be
// enabled.
extern "C" void LLVMRustContextConfigureDiagnosticHandler(
LLVMContextRef C, LLVMDiagnosticHandlerTy DiagnosticHandlerCallback,
void *DiagnosticHandlerContext, bool RemarkAllPasses,
const char * const * RemarkPasses, size_t RemarkPassesLen) {
class RustDiagnosticHandler final : public DiagnosticHandler {
public:
RustDiagnosticHandler(LLVMDiagnosticHandlerTy DiagnosticHandlerCallback,
void *DiagnosticHandlerContext,
bool RemarkAllPasses,
std::vector<std::string> RemarkPasses)
: DiagnosticHandlerCallback(DiagnosticHandlerCallback),
DiagnosticHandlerContext(DiagnosticHandlerContext),
RemarkAllPasses(RemarkAllPasses),
RemarkPasses(RemarkPasses) {}
virtual bool handleDiagnostics(const DiagnosticInfo &DI) override {
if (DiagnosticHandlerCallback) {
DiagnosticHandlerCallback(DI, DiagnosticHandlerContext);
return true;
}
return false;
}
bool isAnalysisRemarkEnabled(StringRef PassName) const override {
return isRemarkEnabled(PassName);
}
bool isMissedOptRemarkEnabled(StringRef PassName) const override {
return isRemarkEnabled(PassName);
}
bool isPassedOptRemarkEnabled(StringRef PassName) const override {
return isRemarkEnabled(PassName);
}
bool isAnyRemarkEnabled() const override {
return RemarkAllPasses || !RemarkPasses.empty();
}
private:
bool isRemarkEnabled(StringRef PassName) const {
if (RemarkAllPasses)
return true;
for (auto &Pass : RemarkPasses)
if (Pass == PassName)
return true;
return false;
}
LLVMDiagnosticHandlerTy DiagnosticHandlerCallback = nullptr;
void *DiagnosticHandlerContext = nullptr;
bool RemarkAllPasses = false;
std::vector<std::string> RemarkPasses;
};
std::vector<std::string> Passes;
for (size_t I = 0; I != RemarkPassesLen; ++I)
Passes.push_back(RemarkPasses[I]);
unwrap(C)->setDiagnosticHandler(std::make_unique<RustDiagnosticHandler>(
DiagnosticHandlerCallback, DiagnosticHandlerContext, RemarkAllPasses, Passes));
}
extern "C" void LLVMRustGetMangledName(LLVMValueRef V, RustStringRef Str) {
RawRustStringOstream OS(Str);
GlobalValue *GV = unwrap<GlobalValue>(V);
Mangler().getNameWithPrefix(OS, GV, true);
}
// LLVMGetAggregateElement was added in LLVM 15. For earlier LLVM versions just
// use its implementation.
#if LLVM_VERSION_LT(15, 0)
extern "C" LLVMValueRef LLVMGetAggregateElement(LLVMValueRef C, unsigned Idx) {
return wrap(unwrap<Constant>(C)->getAggregateElement(Idx));
}
#endif
extern "C" int32_t LLVMRustGetElementTypeArgIndex(LLVMValueRef CallSite) {
#if LLVM_VERSION_GE(15, 0)
auto *CB = unwrap<CallBase>(CallSite);
switch (CB->getIntrinsicID()) {
case Intrinsic::arm_ldrex:
return 0;
case Intrinsic::arm_strex:
return 1;
}
#endif
return -1;
}
extern "C" bool LLVMRustIsBitcode(char *ptr, size_t len) {
return identify_magic(StringRef(ptr, len)) == file_magic::bitcode;
}