Remove last vestiges of old-style intrinsics

Closes #2048
This commit is contained in:
Marijn Haverbeke 2012-03-23 15:05:16 +01:00
parent 837478ccb0
commit 1b81c5112a
28 changed files with 37 additions and 981 deletions

View File

@ -317,7 +317,6 @@ HSREQ$(1)_H_$(3) = \
TSREQ$(1)_T_$(2)_H_$(3) = \
$$(HSREQ$(1)_H_$(3)) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$$(CFG_RUNTIME) \
$$(TLIB$(1)_T_$(2)_H_$(3))/intrinsics.bc \
$$(TLIB$(1)_T_$(2)_H_$(3))/libmorestack.a
# Prerequisites for complete stageN targets

View File

@ -99,8 +99,6 @@ clean$(1)_T_$(2)_H_$(3):
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBRUSTC_GLOB)
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_RUSTLLVM)
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/libstd.rlib
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/intrinsics.bc
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/intrinsics.ll
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/libmorestack.a
endef

View File

@ -42,7 +42,6 @@ install-target-$(1)-host-$(2): $$(SREQ$$(ISTAGE)_T_$(1)_H_$(2))
$$(TL$(1)$(2)),$$(PTL$(1)$(2)),$$(STDLIB_GLOB))
$$(Q)$$(call INSTALL_LIB, \
$$(TL$(1)$(2)),$$(PTL$(1)$(2)),$$(LIBRUSTC_GLOB))
$$(Q)$$(call INSTALL,$$(TL$(1)$(2)),$$(PTL$(1)$(2)),intrinsics.bc)
$$(Q)$$(call INSTALL,$$(TL$(1)$(2)),$$(PTL$(1)$(2)),libmorestack.a)
endef

View File

@ -13,17 +13,6 @@ USE_SNAPSHOT_CORELIB=0
define TARGET_STAGE_N
$$(TLIB$(1)_T_$(2)_H_$(3))/intrinsics.ll: \
$$(S)src/rt/intrinsics/intrinsics.$(HOST_$(2)).ll.in
@$$(call E, sed: $$@)
$$(Q)sed s/@CFG_TARGET_TRIPLE@/$(2)/ $$< > $$@
$$(TLIB$(1)_T_$(2)_H_$(3))/intrinsics.bc: \
$$(TLIB$(1)_T_$(2)_H_$(3))/intrinsics.ll \
$$(LLVM_CONFIG_$(2))
@$$(call E, llvms-as: $$@)
$$(Q)$$(LLVM_AS_$(2)) -o $$@ $$<
$$(TLIB$(1)_T_$(2)_H_$(3))/libmorestack.a: \
rt/$(2)/arch/$$(HOST_$(2))/libmorestack.a
@$$(call E, cp: $$@)

View File

@ -1,27 +0,0 @@
#!/bin/sh
# This script generates new definitions for the intrinsics using
# clang. This is not currently in the Makefile to avoid any dependency
# on clang.
for ARCH in i386 x86_64
do
if [ $ARCH = "i386" ]
then
BITS=32
else
BITS=64
fi
clang++ -emit-llvm -S -m$BITS -O3 -Isrc/rt/isaac -Isrc/rt/uthash \
-Isrc/rt/arch/$ARCH -Isrc/rt -fno-stack-protector \
-o src/rt/intrinsics/intrinsics.$ARCH.ll.in \
src/rt/intrinsics/intrinsics.cpp
sed -i .orig \
-e 's/^target datalayout =/; target datalayout =/' \
src/rt/intrinsics/intrinsics.$ARCH.ll.in
sed -i .orig \
-e 's/^target triple = "[^"]*"/target triple = "@CFG_TARGET_TRIPLE@"/' \
src/rt/intrinsics/intrinsics.$ARCH.ll.in
rm src/rt/intrinsics/intrinsics.$ARCH.ll.in.orig
done

View File

@ -55,7 +55,7 @@ fn rust_port_select(dptr: **rust_port, ports: **rust_port,
yield: *libc::uintptr_t);
}
#[abi = "rust-builtin"]
#[abi = "rust-intrinsic"]
native mod rusti {
fn init<T>() -> T;
}

View File

@ -19,7 +19,7 @@
fn memmove(dest: *c_void, src: *c_void, n: libc::size_t) -> *c_void;
}
#[abi = "rust-builtin"]
#[abi = "rust-intrinsic"]
native mod rusti {
fn addr_of<T>(val: T) -> *T;
}

View File

@ -28,7 +28,7 @@ enum type_desc = {
fn rust_set_exit_status(code: libc::intptr_t);
}
#[abi = "rust-builtin"]
#[abi = "rust-intrinsic"]
native mod rusti {
fn get_tydesc<T>() -> *();
fn size_of<T>() -> uint;

View File

@ -2,7 +2,7 @@
export reinterpret_cast, forget;
#[abi = "rust-builtin"]
#[abi = "rust-intrinsic"]
native mod rusti {
fn forget<T>(-x: T);
fn reinterpret_cast<T, U>(e: T) -> U;

View File

@ -1,174 +0,0 @@
// Rust intrinsics. These are built into each compilation unit and are
// run on the Rust stack. They should not call C methods because that
// will very likely result in running off the end of the stack.
// Build with the script in src/etc/gen-intrinsics
#include "../rust_internal.h"
#include "../rust_util.h"
#include <cstdlib>
#include <cstring>
extern "C" CDECL void
rust_task_yield(rust_task *task, bool *killed);
extern "C" void
rust_intrinsic_vec_len(size_t *retptr,
void *env,
type_desc *ty,
rust_vec **vp)
{
*retptr = (*vp)->fill / ty->size;
}
extern "C" void
rust_intrinsic_ptr_offset(void **retptr,
void *env,
type_desc *ty,
void *ptr,
uintptr_t count)
{
*retptr = &((uint8_t *)ptr)[ty->size * count];
}
extern "C" void
rust_intrinsic_cast(void *retptr,
void *env,
type_desc *t1,
type_desc *t2,
void *src)
{
// assert t1->size == t2->size
// FIXME: This should be easily expressible in rust
memmove(retptr, src, t1->size);
}
extern "C" void
rust_intrinsic_addr_of(void **retptr,
void *env,
type_desc *ty,
void *valptr) {
*retptr = valptr;
}
struct rust_fn {
uintptr_t *fn;
rust_box *env;
};
typedef void (*retptr_fn)(void **retptr,
void *env,
void **dptr);
// FIXME (1185): This exists just to get access to the return pointer
extern "C" void
rust_intrinsic_call_with_retptr(void **retptr,
void *env,
type_desc *ty,
rust_fn *recvfn) {
retptr_fn fn = ((retptr_fn)(recvfn->fn));
((retptr_fn)(*fn))(NULL, recvfn->env, retptr);
}
extern "C" void
rust_intrinsic_get_type_desc(void **retptr,
void *env,
type_desc* ty) {
*(type_desc**)retptr = ty;
}
extern "C" void
rust_intrinsic_task_yield(void **retptr,
void *env,
rust_task *task,
bool *killed) {
rust_task_yield(task, killed);
}
extern "C" void
rust_intrinsic_memmove(void *retptr,
void *env,
type_desc *ty,
void *dst,
void *src,
uintptr_t count)
{
memmove(dst, src, ty->size * count);
}
extern "C" void
rust_intrinsic_memcpy(void *retptr,
void *env,
type_desc *ty,
void *dst,
void *src,
uintptr_t count)
{
memcpy(dst, src, ty->size * count);
}
extern "C" void
rust_intrinsic_leak(void *retptr,
void *env,
type_desc *ty,
void *thing)
{
}
extern "C" CDECL void *
upcall_shared_realloc(void *ptr, size_t size);
inline void reserve_vec_fast(rust_vec **vpp, size_t size) {
if (size > (*vpp)->alloc) {
size_t new_size = next_power_of_two(size);
size_t alloc_size = new_size + sizeof(rust_vec);
// Because this is called from an intrinsic we need to use
// the exported API
*vpp = (rust_vec*)upcall_shared_realloc(*vpp, alloc_size);
(*vpp)->alloc = new_size;
}
}
// Copy elements from one vector to another,
// dealing with reference counts
static inline void
copy_elements(type_desc *elem_t,
void *pdst, void *psrc, size_t n) {
char *dst = (char *)pdst, *src = (char *)psrc;
memmove(dst, src, n);
// increment the refcount of each element of the vector
if (elem_t->take_glue) {
glue_fn *take_glue = elem_t->take_glue;
size_t elem_size = elem_t->size;
const type_desc **tydescs = elem_t->first_param;
for (char *p = dst; p < dst+n; p += elem_size) {
take_glue(NULL, NULL, tydescs, p);
}
}
}
// Because this is used so often, and it calls take glue that must run
// on the rust stack, it is statically compiled into every crate.
extern "C" CDECL void
upcall_intrinsic_vec_push(rust_vec** vp,
type_desc* elt_ty, void* elt) {
size_t new_sz = (*vp)->fill + elt_ty->size;
reserve_vec_fast(vp, new_sz);
rust_vec* v = *vp;
copy_elements(elt_ty, &v->data[0] + v->fill,
elt, elt_ty->size);
v->fill += elt_ty->size;
}
// FIXME: Transational. Remove
extern "C" CDECL void
upcall_vec_push(rust_vec** vp,
type_desc* elt_ty, void* elt) {
upcall_intrinsic_vec_push(vp, elt_ty, elt);
}
extern "C" CDECL void
rust_intrinsic_frame_address(void **p, unsigned n) {
*p = __builtin_frame_address(n);
}

View File

@ -1,236 +0,0 @@
; ModuleID = 'src/rt/intrinsics/intrinsics.cpp'
; target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
target triple = "@CFG_TARGET_TRIPLE@"
%0 = type { i32, %struct.rust_task**, i32 }
%1 = type { %"struct.hash_map<long, rust_task *>::map_entry"* }
%class.array_list = type { i32, %"struct.memory_region::alloc_header"**, i32 }
%class.boxed_region = type { %class.memory_region*, %struct.rust_opaque_box* }
%class.circular_buffer = type { %class.rust_kernel*, i32, i32, i32, i32, i8* }
%class.context = type { %struct.registers_t, %class.context*, [12 x i8] }
%"class.debug::task_debug_info" = type { %"class.std::map" }
%class.hash_map = type { %"struct.hash_map<long, rust_port *>::map_entry"* }
%class.indexed_list = type { i32 (...)**, %0 }
%class.lock_and_signal = type { i32 (...)**, %struct._opaque_pthread_cond_t, %struct._opaque_pthread_mutex_t, %struct._opaque_pthread_t* }
%class.memory_region = type { i32 (...)**, %class.rust_srv*, %class.memory_region*, i32, %class.array_list, i8, i8, %class.lock_and_signal }
%class.rust_crate_cache = type { %struct.type_desc*, %struct.rust_hashable_dict*, %struct.rust_task_thread*, i32 }
%class.rust_kernel = type { %class.memory_region, %class.rust_log, %class.rust_srv*, %class.lock_and_signal, i32, i32, %1, %class.lock_and_signal, i32, %class.lock_and_signal, i32, %"class.std::map", %"class.std::vector", %struct.rust_env* }
%class.rust_log = type { i32 (...)**, %class.rust_srv*, %struct.rust_task_thread*, i8 }
%class.rust_obstack = type { %struct.rust_obstack_chunk*, %struct.rust_task* }
%class.rust_port = type { i32, i32, %class.rust_kernel*, %struct.rust_task*, i32, %class.circular_buffer, %class.lock_and_signal }
%class.rust_port_selector = type { %class.rust_port**, i32, %class.lock_and_signal }
%class.rust_scheduler = type opaque
%class.rust_srv = type { i32 (...)**, %struct.rust_env*, %class.memory_region }
%class.rust_task_list = type { %class.indexed_list, %struct.rust_task_thread*, i8* }
%class.rust_thread = type { i32 (...)**, %struct._opaque_pthread_t*, i32 }
%"class.std::_Rb_tree" = type { %"struct.std::_Rb_tree<long, std::pair<const long, rust_scheduler *>, std::_Select1st<std::pair<const long, rust_scheduler *> >, std::less<long>, std::allocator<std::pair<const long, rust_scheduler *> > >::_Rb_tree_impl" }
%"class.std::map" = type { %"class.std::_Rb_tree" }
%"class.std::vector" = type { %"struct.std::_Vector_base" }
%struct.UT_hash_bucket = type { %struct.UT_hash_handle*, i32, i32 }
%struct.UT_hash_handle = type { %struct.UT_hash_table*, i8*, i8*, %struct.UT_hash_handle*, %struct.UT_hash_handle*, i8*, i32, i32 }
%struct.UT_hash_table = type { %struct.UT_hash_bucket*, i32, i32, i32, %struct.UT_hash_handle*, i32, i32, i32, i32, i32 }
%struct.__darwin_pthread_handler_rec = type { void (i8*)*, i8*, %struct.__darwin_pthread_handler_rec* }
%struct._opaque_pthread_attr_t = type { i32, [36 x i8] }
%struct._opaque_pthread_cond_t = type { i32, [24 x i8] }
%struct._opaque_pthread_mutex_t = type { i32, [40 x i8] }
%struct._opaque_pthread_t = type { i32, %struct.__darwin_pthread_handler_rec*, [596 x i8] }
%struct.chan_handle = type { i32, i32 }
%"struct.hash_map<long, rust_port *>::map_entry" = type opaque
%"struct.hash_map<long, rust_task *>::map_entry" = type opaque
%"struct.memory_region::alloc_header" = type { i8 }
%struct.randctx = type { i32, [256 x i32], [256 x i32], i32, i32, i32 }
%struct.registers_t = type { i32, i32, i32, i32, i32, i32, i32, i32, i16, i16, i16, i16, i16, i16, i32, i32, [12 x i8] }
%struct.rust_box = type opaque
%struct.rust_env = type { i32, i32, i32, i8*, i8, i8, i8* }
%struct.rust_fn = type { i32*, %struct.rust_box* }
%struct.rust_hashable_dict = type { %struct.UT_hash_handle, [0 x i8*] }
%struct.rust_obstack_chunk = type { %struct.rust_obstack_chunk*, i32, i32, i32, [0 x i8] }
%struct.rust_opaque_box = type { i32, %struct.type_desc*, %struct.rust_opaque_box*, %struct.rust_opaque_box* }
%struct.rust_shape_tables = type { i8*, i8* }
%struct.rust_task = type { i32, i32, i8, %struct.chan_handle, [12 x i8], %class.context, %struct.stk_seg*, i32, %class.rust_scheduler*, %struct.rust_task_thread*, %class.rust_crate_cache*, %class.rust_kernel*, i8*, %class.rust_task_list*, %"struct.memory_region::alloc_header"*, i8*, %struct.rust_task*, i32, i32, i32*, %class.memory_region, %class.boxed_region, i8, i8, %class.lock_and_signal, %class.hash_map, %class.rust_obstack, i32, %"class.debug::task_debug_info", i32, i8, i8, %struct.stk_seg*, i32, i32, %class.rust_port_selector, [8 x i8] }
%struct.rust_task_thread = type { %class.rust_thread, i32, %class.rust_log, i32, %class.rust_srv*, i8*, %class.rust_task_list, %class.rust_task_list, %class.rust_task_list, %class.rust_task_list, %class.rust_crate_cache, %struct.randctx, %class.rust_kernel*, %class.rust_scheduler*, i32, i32, %class.lock_and_signal, i32, %struct._opaque_pthread_attr_t, %struct.rust_env*, [4 x i8], %class.context, i8, %struct.stk_seg*, %struct.stk_seg*, [4 x i8] }
%struct.rust_vec = type { i32, i32, [0 x i8] }
%"struct.std::_Rb_tree<long, std::pair<const long, rust_scheduler *>, std::_Select1st<std::pair<const long, rust_scheduler *> >, std::less<long>, std::allocator<std::pair<const long, rust_scheduler *> > >::_Rb_tree_impl" = type { %"struct.memory_region::alloc_header", %"struct.std::_Rb_tree_node_base", i32 }
%"struct.std::_Rb_tree_node_base" = type { i32, %"struct.std::_Rb_tree_node_base"*, %"struct.std::_Rb_tree_node_base"*, %"struct.std::_Rb_tree_node_base"* }
%"struct.std::_Vector_base" = type { %"struct.std::_Vector_base<long, std::allocator<long> >::_Vector_impl" }
%"struct.std::_Vector_base<long, std::allocator<long> >::_Vector_impl" = type { i32*, i32*, i32* }
%struct.stk_seg = type { %struct.stk_seg*, %struct.stk_seg*, i32, i32, i32, i32, [0 x i8] }
%struct.type_desc = type { %struct.type_desc**, i32, i32, void (i8*, i8*, %struct.type_desc**, i8*)*, void (i8*, i8*, %struct.type_desc**, i8*)*, void (i8*, i8*, %struct.type_desc**, i8*)*, i8*, void (i8*, i8*, %struct.type_desc**, i8*)*, void (i8*, i8*, %struct.type_desc**, i8*)*, i32, i8*, i8*, %struct.rust_shape_tables*, i32, i32, %struct.UT_hash_handle, i32, [0 x %struct.type_desc*] }
define void @rust_intrinsic_vec_len(i32* nocapture %retptr, i8* nocapture %env, %struct.type_desc* nocapture %ty, %struct.rust_vec** nocapture %vp) nounwind {
%1 = load %struct.rust_vec** %vp, align 4
%2 = getelementptr inbounds %struct.rust_vec* %1, i32 0, i32 0
%3 = load i32* %2, align 4
%4 = getelementptr inbounds %struct.type_desc* %ty, i32 0, i32 1
%5 = load i32* %4, align 4
%6 = udiv i32 %3, %5
store i32 %6, i32* %retptr, align 4
ret void
}
define void @rust_intrinsic_ptr_offset(i8** nocapture %retptr, i8* nocapture %env, %struct.type_desc* nocapture %ty, i8* %ptr, i32 %count) nounwind {
%1 = getelementptr inbounds %struct.type_desc* %ty, i32 0, i32 1
%2 = load i32* %1, align 4
%3 = mul i32 %2, %count
%4 = getelementptr inbounds i8* %ptr, i32 %3
store i8* %4, i8** %retptr, align 4
ret void
}
define void @rust_intrinsic_cast(i8* nocapture %retptr, i8* nocapture %env, %struct.type_desc* nocapture %t1, %struct.type_desc* nocapture %t2, i8* nocapture %src) nounwind {
%1 = getelementptr inbounds %struct.type_desc* %t1, i32 0, i32 1
%2 = load i32* %1, align 4
tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %retptr, i8* %src, i32 %2, i32 1, i1 false)
ret void
}
declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
define void @rust_intrinsic_addr_of(i8** nocapture %retptr, i8* nocapture %env, %struct.type_desc* nocapture %ty, i8* %valptr) nounwind {
store i8* %valptr, i8** %retptr, align 4
ret void
}
define void @rust_intrinsic_call_with_retptr(i8** %retptr, i8* nocapture %env, %struct.type_desc* nocapture %ty, %struct.rust_fn* nocapture %recvfn) {
%1 = getelementptr inbounds %struct.rust_fn* %recvfn, i32 0, i32 0
%2 = load i32** %1, align 4
%3 = bitcast i32* %2 to void (i8**, i8*, i8**)*
%4 = getelementptr inbounds %struct.rust_fn* %recvfn, i32 0, i32 1
%5 = load %struct.rust_box** %4, align 4
%6 = bitcast %struct.rust_box* %5 to i8*
tail call void %3(i8** null, i8* %6, i8** %retptr)
ret void
}
define void @rust_intrinsic_get_type_desc(i8** nocapture %retptr, i8* nocapture %env, %struct.type_desc* %ty) nounwind {
%ty.c = bitcast %struct.type_desc* %ty to i8*
store i8* %ty.c, i8** %retptr, align 4
ret void
}
define void @rust_intrinsic_task_yield(i8** nocapture %retptr, i8* nocapture %env, %struct.rust_task* %task, i8* %killed) {
tail call void @rust_task_yield(%struct.rust_task* %task, i8* %killed)
ret void
}
declare void @rust_task_yield(%struct.rust_task*, i8*)
define void @rust_intrinsic_memmove(i8* nocapture %retptr, i8* nocapture %env, %struct.type_desc* nocapture %ty, i8* nocapture %dst, i8* nocapture %src, i32 %count) nounwind {
%1 = getelementptr inbounds %struct.type_desc* %ty, i32 0, i32 1
%2 = load i32* %1, align 4
%3 = mul i32 %2, %count
tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %3, i32 1, i1 false)
ret void
}
define void @rust_intrinsic_memcpy(i8* nocapture %retptr, i8* nocapture %env, %struct.type_desc* nocapture %ty, i8* nocapture %dst, i8* nocapture %src, i32 %count) nounwind {
%1 = getelementptr inbounds %struct.type_desc* %ty, i32 0, i32 1
%2 = load i32* %1, align 4
%3 = mul i32 %2, %count
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %3, i32 1, i1 false)
ret void
}
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
define void @rust_intrinsic_leak(i8* nocapture %retptr, i8* nocapture %env, %struct.type_desc* nocapture %ty, i8* nocapture %thing) nounwind readnone {
ret void
}
define void @upcall_intrinsic_vec_push(%struct.rust_vec** nocapture %vp, %struct.type_desc* nocapture %elt_ty, i8* nocapture %elt) {
; <label>:0
%1 = load %struct.rust_vec** %vp, align 4
%2 = getelementptr inbounds %struct.rust_vec* %1, i32 0, i32 0
%3 = load i32* %2, align 4
%4 = getelementptr inbounds %struct.type_desc* %elt_ty, i32 0, i32 1
%5 = load i32* %4, align 4
%6 = add i32 %5, %3
%7 = getelementptr inbounds %struct.rust_vec* %1, i32 0, i32 1
%8 = load i32* %7, align 4
%9 = icmp ult i32 %8, %6
br i1 %9, label %10, label %_Z16reserve_vec_fastPP8rust_vecm.exit
; <label>:10 ; preds = %0
%11 = add i32 %6, -1
%12 = lshr i32 %11, 1
%13 = or i32 %12, %11
%14 = lshr i32 %13, 2
%15 = or i32 %14, %13
%16 = lshr i32 %15, 4
%17 = or i32 %16, %15
%18 = lshr i32 %17, 8
%19 = or i32 %18, %17
%20 = lshr i32 %19, 16
%21 = or i32 %20, %19
%22 = add i32 %21, 1
%23 = add i32 %21, 9
%24 = bitcast %struct.rust_vec* %1 to i8*
%25 = tail call i8* @upcall_shared_realloc(i8* %24, i32 %23)
%26 = bitcast i8* %25 to %struct.rust_vec*
store %struct.rust_vec* %26, %struct.rust_vec** %vp, align 4
%27 = getelementptr inbounds i8* %25, i32 4
%28 = bitcast i8* %27 to i32*
store i32 %22, i32* %28, align 4
%.pr = load i32* %4, align 4
%.pre = load %struct.rust_vec** %vp, align 4
%.phi.trans.insert = getelementptr inbounds %struct.rust_vec* %.pre, i32 0, i32 0
%.pre4 = load i32* %.phi.trans.insert, align 4
br label %_Z16reserve_vec_fastPP8rust_vecm.exit
_Z16reserve_vec_fastPP8rust_vecm.exit: ; preds = %0, %10
%29 = phi i32 [ %3, %0 ], [ %.pre4, %10 ]
%30 = phi %struct.rust_vec* [ %1, %0 ], [ %.pre, %10 ]
%31 = phi i32 [ %5, %0 ], [ %.pr, %10 ]
%32 = getelementptr inbounds %struct.rust_vec* %30, i32 0, i32 0
%33 = getelementptr inbounds %struct.rust_vec* %30, i32 0, i32 2, i32 %29
tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %33, i8* %elt, i32 %31, i32 1, i1 false)
%34 = getelementptr inbounds %struct.type_desc* %elt_ty, i32 0, i32 3
%35 = load void (i8*, i8*, %struct.type_desc**, i8*)** %34, align 4
%36 = icmp eq void (i8*, i8*, %struct.type_desc**, i8*)* %35, null
br i1 %36, label %_ZL13copy_elementsP9type_descPvS1_m.exit, label %37
; <label>:37 ; preds = %_Z16reserve_vec_fastPP8rust_vecm.exit
%38 = load i32* %4, align 4
%39 = getelementptr inbounds %struct.type_desc* %elt_ty, i32 0, i32 0
%40 = load %struct.type_desc*** %39, align 4
%41 = icmp sgt i32 %31, 0
br i1 %41, label %.lr.ph.i.preheader, label %_ZL13copy_elementsP9type_descPvS1_m.exit
.lr.ph.i.preheader: ; preds = %37
%scevgep = getelementptr %struct.rust_vec* %30, i32 1, i32 0
%scevgep2 = bitcast i32* %scevgep to i8*
br label %.lr.ph.i
.lr.ph.i: ; preds = %.lr.ph.i.preheader, %.lr.ph.i
%indvar.i = phi i32 [ %indvar.next.i, %.lr.ph.i ], [ 0, %.lr.ph.i.preheader ]
%tmp = mul i32 %38, %indvar.i
%tmp2.i = add i32 %38, %tmp
%tmp3 = add i32 %29, %tmp
%p.01.i = getelementptr i8* %scevgep2, i32 %tmp3
tail call void %35(i8* null, i8* null, %struct.type_desc** %40, i8* %p.01.i)
%42 = icmp slt i32 %tmp2.i, %31
%indvar.next.i = add i32 %indvar.i, 1
br i1 %42, label %.lr.ph.i, label %_ZL13copy_elementsP9type_descPvS1_m.exit
_ZL13copy_elementsP9type_descPvS1_m.exit: ; preds = %.lr.ph.i, %_Z16reserve_vec_fastPP8rust_vecm.exit, %37
%43 = load i32* %4, align 4
%44 = load i32* %32, align 4
%45 = add i32 %44, %43
store i32 %45, i32* %32, align 4
ret void
}
define void @upcall_vec_push(%struct.rust_vec** nocapture %vp, %struct.type_desc* nocapture %elt_ty, i8* nocapture %elt) {
tail call void @upcall_intrinsic_vec_push(%struct.rust_vec** %vp, %struct.type_desc* %elt_ty, i8* %elt)
ret void
}
define void @rust_intrinsic_frame_address(i8** nocapture %p) nounwind {
%1 = tail call i8* @llvm.frameaddress(i32 1)
store i8* %1, i8** %p, align 4
ret void
}
declare i8* @llvm.frameaddress(i32) nounwind readnone
declare i8* @upcall_shared_realloc(i8*, i32)

View File

@ -1,149 +0,0 @@
; ModuleID = 'intrinsics.cpp'
target triple = "@CFG_LLVM_TRIPLE@"
%struct.rust_task = type { i32, %struct.stk_seg*, i32, i32, %struct.gc_alloc*, %struct.rust_scheduler*, %class.rust_crate_cache*, %class.rust_kernel*, i8*, %class.rust_task_list*, %struct.rust_cond*, i8*, %struct.rust_task*, i32, i32, i32, %class.timer, i32*, %class.array_list, %class.context, i32, i32, %class.memory_region, %"class.rust_task::wakeup_callback"*, i8, i8, %class.lock_and_signal }
%struct.stk_seg = type { i32, i32, [0 x i8] }
%struct.gc_alloc = type { %struct.gc_alloc*, %struct.gc_alloc*, i32, [0 x i8] }
%struct.rust_scheduler = type { %class.rust_thread, %struct.rc_base, i32, %class.rust_log, i32, %class.rust_srv*, i8*, %class.rust_task_list, %class.rust_task_list, %class.rust_task_list, %class.rust_task_list, %class.rust_crate_cache, %struct.randctx, %class.rust_kernel*, i32, %class.hash_map, %class.hash_map.3, i32, %class.lock_and_signal, i32, %struct._opaque_pthread_attr_t, %struct.rust_env* }
%class.rust_thread = type { i32 (...)**, i8, %struct._opaque_pthread_t* }
%struct._opaque_pthread_t = type { i32, %struct.__darwin_pthread_handler_rec*, [596 x i8] }
%struct.__darwin_pthread_handler_rec = type { {}*, i8*, %struct.__darwin_pthread_handler_rec* }
%struct.rc_base = type { i32 }
%class.rust_log = type { i32 (...)**, %class.rust_srv*, %struct.rust_scheduler*, i8 }
%class.rust_srv = type { i32 (...)**, %struct.rust_env*, %class.memory_region }
%struct.rust_env = type { i32, i32, i8*, i8, i8, i8* }
%class.memory_region = type { i32 (...)**, %class.rust_srv*, %class.memory_region*, i32, %class.array_list.0, i8, i8, %class.lock_and_signal, i8 }
%class.array_list.0 = type { i32, %"struct.memory_region::alloc_header"**, i32 }
%"struct.memory_region::alloc_header" = type { i32, i32, i8*, [0 x i8] }
%class.lock_and_signal = type { i32 (...)**, %struct._opaque_pthread_cond_t, %struct._opaque_pthread_mutex_t, %struct._opaque_pthread_t*, i8, i8 }
%struct._opaque_pthread_cond_t = type { i32, [24 x i8] }
%struct._opaque_pthread_mutex_t = type { i32, [40 x i8] }
%class.rust_task_list = type { %class.indexed_list, %struct.rust_scheduler*, i8* }
%class.indexed_list = type { i32 (...)**, %class.array_list }
%class.array_list = type { i32, %struct.rust_task**, i32 }
%class.rust_crate_cache = type { %struct.type_desc*, %struct.rust_scheduler*, i32 }
%struct.type_desc = type { %struct.type_desc**, i32, i32, {}*, {}*, {}*, {}*, {}*, {}*, i32, {}*, %struct.UT_hash_handle, i32, [0 x %struct.type_desc*] }
%struct.UT_hash_handle = type { %struct.UT_hash_table*, i8*, i8*, %struct.UT_hash_handle*, %struct.UT_hash_handle*, i8*, i32, i32 }
%struct.UT_hash_table = type { %struct.UT_hash_bucket*, i32, i32, i32, %struct.UT_hash_handle*, i32, i32, i32, i32, i32 }
%struct.UT_hash_bucket = type { %struct.UT_hash_handle*, i32, i32 }
%struct.randctx = type { i32, [256 x i32], [256 x i32], i32, i32, i32 }
%class.rust_kernel = type { i32 (...)**, %class.memory_region, %class.rust_log, %class.rust_srv*, %class.lock_and_signal, %class.array_list.4, %struct.randctx, i32, i32, i32, %struct.rust_env* }
%class.array_list.4 = type { i32, %struct.rust_scheduler**, i32 }
%class.hash_map = type { %"struct.hash_map<rust_task *, rust_task *>::map_entry"* }
%"struct.hash_map<rust_task *, rust_task *>::map_entry" = type opaque
%class.hash_map.3 = type { %"struct.hash_map<rust_port *, rust_port *>::map_entry"* }
%"struct.hash_map<rust_port *, rust_port *>::map_entry" = type opaque
%struct._opaque_pthread_attr_t = type { i32, [36 x i8] }
%struct.rust_cond = type { i8 }
%class.timer = type { i32 (...)**, i64, i64 }
%class.context = type { %struct.registers_t, %class.context* }
%struct.registers_t = type { i32, i32, i32, i32, i32, i32, i32, i32, i16, i16, i16, i16, i16, i16, i32, i32 }
%"class.rust_task::wakeup_callback" = type { i32 (...)** }
%struct.rust_vec = type { %struct.rc_base.5, i32, i32, i32, [0 x i8] }
%struct.rc_base.5 = type { i32 }
%struct.rust_ivec = type { i32, i32, %union.rust_ivec_payload }
%union.rust_ivec_payload = type { %struct.rust_ivec_heap* }
%struct.rust_ivec_heap = type { i32, [0 x i8] }
%class.rust_port = type { i32, %class.rust_kernel*, %struct.rust_task*, i32, %class.ptr_vec, %class.ptr_vec.7, %class.rust_chan*, %class.lock_and_signal }
%class.ptr_vec = type { %struct.rust_task*, i32, i32, %struct.rust_token** }
%struct.rust_token = type opaque
%class.ptr_vec.7 = type { %struct.rust_task*, i32, i32, %class.rust_chan** }
%class.rust_chan = type { i32, %class.rust_kernel*, %struct.rust_task*, %class.rust_port*, i32, %class.circular_buffer }
%class.circular_buffer = type { %class.rust_kernel*, i32, i32, i32, i32, i8* }
@.str = private unnamed_addr constant [42 x i8] c"attempt to cast values of differing sizes\00", align 1
@.str1 = private unnamed_addr constant [15 x i8] c"intrinsics.cpp\00", align 1
define linkonce_odr void @rust_intrinsic_vec_len(%struct.rust_task* nocapture %task, i32* nocapture %retptr, %struct.type_desc* nocapture %ty, %struct.rust_vec* nocapture %v) nounwind {
entry:
%fill = getelementptr inbounds %struct.rust_vec* %v, i32 0, i32 2
%tmp1 = load i32* %fill, align 4, !tbaa !0
%size = getelementptr inbounds %struct.type_desc* %ty, i32 0, i32 1
%tmp3 = load i32* %size, align 4, !tbaa !0
%div = udiv i32 %tmp1, %tmp3
store i32 %div, i32* %retptr, align 4, !tbaa !0
ret void
}
define linkonce_odr void @rust_intrinsic_ivec_len(%struct.rust_task* nocapture %task, i32* nocapture %retptr, %struct.type_desc* nocapture %ty, %struct.rust_ivec* nocapture %v) nounwind {
entry:
%fill1 = getelementptr inbounds %struct.rust_ivec* %v, i32 0, i32 0
%tmp2 = load i32* %fill1, align 4, !tbaa !0
%tobool = icmp eq i32 %tmp2, 0
br i1 %tobool, label %if.else, label %if.end17
if.else: ; preds = %entry
%ptr = getelementptr inbounds %struct.rust_ivec* %v, i32 0, i32 2, i32 0
%tmp7 = load %struct.rust_ivec_heap** %ptr, align 4, !tbaa !3
%tobool8 = icmp eq %struct.rust_ivec_heap* %tmp7, null
br i1 %tobool8, label %if.end17, label %if.then9
if.then9: ; preds = %if.else
%fill14 = getelementptr inbounds %struct.rust_ivec_heap* %tmp7, i32 0, i32 0
%tmp15 = load i32* %fill14, align 4, !tbaa !0
br label %if.end17
if.end17: ; preds = %if.else, %entry, %if.then9
%fill.0 = phi i32 [ %tmp15, %if.then9 ], [ %tmp2, %entry ], [ 0, %if.else ]
%size = getelementptr inbounds %struct.type_desc* %ty, i32 0, i32 1
%tmp20 = load i32* %size, align 4, !tbaa !0
%div = udiv i32 %fill.0, %tmp20
store i32 %div, i32* %retptr, align 4, !tbaa !0
ret void
}
define linkonce_odr void @rust_intrinsic_ptr_offset(%struct.rust_task* nocapture %task, i8** nocapture %retptr, %struct.type_desc* nocapture %ty, i8* %ptr, i32 %count) nounwind {
entry:
%size = getelementptr inbounds %struct.type_desc* %ty, i32 0, i32 1
%tmp1 = load i32* %size, align 4, !tbaa !0
%mul = mul i32 %tmp1, %count
%arrayidx = getelementptr inbounds i8* %ptr, i32 %mul
store i8* %arrayidx, i8** %retptr, align 4, !tbaa !3
ret void
}
define linkonce_odr void @rust_intrinsic_cast(%struct.rust_task* %task, i8* nocapture %retptr, %struct.type_desc* nocapture %t1, %struct.type_desc* nocapture %t2, i8* nocapture %src) {
entry:
%size = getelementptr inbounds %struct.type_desc* %t1, i32 0, i32 1
%tmp1 = load i32* %size, align 4, !tbaa !0
%size3 = getelementptr inbounds %struct.type_desc* %t2, i32 0, i32 1
%tmp4 = load i32* %size3, align 4, !tbaa !0
%cmp = icmp eq i32 %tmp1, %tmp4
br i1 %cmp, label %if.end, label %if.then
if.then: ; preds = %entry
tail call void @upcall_fail(%struct.rust_task* %task, i8* getelementptr inbounds ([42 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([15 x i8]* @.str1, i32 0, i32 0), i32 45)
br label %return
if.end: ; preds = %entry
tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %retptr, i8* %src, i32 %tmp1, i32 1, i1 false)
br label %return
return: ; preds = %if.end, %if.then
ret void
}
declare void @upcall_fail(%struct.rust_task*, i8*, i8*, i32)
declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
define linkonce_odr void @rust_intrinsic_addr_of(%struct.rust_task* nocapture %task, i8** nocapture %retptr, %struct.type_desc* nocapture %ty, i8* %valptr) nounwind {
entry:
store i8* %valptr, i8** %retptr, align 4, !tbaa !3
ret void
}
define linkonce_odr void @rust_intrinsic_recv(%struct.rust_task* %task, i8** nocapture %retptr, %struct.type_desc* nocapture %ty, %class.rust_port* %port) {
entry:
%tmp2 = load i8** %retptr, align 4, !tbaa !3
%0 = bitcast i8* %tmp2 to i32*
tail call void @port_recv(%struct.rust_task* %task, i32* %0, %class.rust_port* %port)
ret void
}
declare void @port_recv(%struct.rust_task*, i32*, %class.rust_port*)
!0 = metadata !{metadata !"long", metadata !1}
!1 = metadata !{metadata !"omnipotent char", metadata !2}
!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
!3 = metadata !{metadata !"any pointer", metadata !1}

View File

@ -1,237 +0,0 @@
; ModuleID = 'src/rt/intrinsics/intrinsics.cpp'
; target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "@CFG_TARGET_TRIPLE@"
%0 = type { i64, %struct.rust_task**, i64 }
%1 = type { %"struct.hash_map<long, rust_task *>::map_entry"* }
%class.array_list = type { i64, %"struct.memory_region::alloc_header"**, i64 }
%class.boxed_region = type { %class.memory_region*, %struct.rust_opaque_box* }
%class.circular_buffer = type { %class.rust_kernel*, i64, i64, i64, i64, i8* }
%class.context = type { %struct.registers_t, %class.context*, [8 x i8] }
%"class.debug::task_debug_info" = type { %"class.std::map" }
%class.hash_map = type { %"struct.hash_map<long, rust_port *>::map_entry"* }
%class.indexed_list = type { i32 (...)**, %0 }
%class.lock_and_signal = type { i32 (...)**, %struct._opaque_pthread_cond_t, %struct._opaque_pthread_attr_t, %struct._opaque_pthread_t* }
%class.memory_region = type { i32 (...)**, %class.rust_srv*, %class.memory_region*, i32, %class.array_list, i8, i8, %class.lock_and_signal }
%class.rust_crate_cache = type { %struct.type_desc*, %struct.rust_hashable_dict*, %struct.rust_task_thread*, i64 }
%class.rust_kernel = type { %class.memory_region, %class.rust_log, %class.rust_srv*, %class.lock_and_signal, i64, i64, %1, %class.lock_and_signal, i32, %class.lock_and_signal, i64, %"class.std::map", %"class.std::vector", %struct.rust_env* }
%class.rust_log = type { i32 (...)**, %class.rust_srv*, %struct.rust_task_thread*, i8 }
%class.rust_obstack = type { %struct.rust_obstack_chunk*, %struct.rust_task* }
%class.rust_port = type { i64, i64, %class.rust_kernel*, %struct.rust_task*, i64, %class.circular_buffer, %class.lock_and_signal }
%class.rust_port_selector = type { %class.rust_port**, i64, %class.lock_and_signal }
%class.rust_scheduler = type opaque
%class.rust_srv = type { i32 (...)**, %struct.rust_env*, %class.memory_region }
%class.rust_task_list = type { %class.indexed_list, %struct.rust_task_thread*, i8* }
%class.rust_thread = type { i32 (...)**, %struct._opaque_pthread_t*, i64 }
%"class.std::_Rb_tree" = type { %"struct.std::_Rb_tree<long, std::pair<const long, rust_scheduler *>, std::_Select1st<std::pair<const long, rust_scheduler *> >, std::less<long>, std::allocator<std::pair<const long, rust_scheduler *> > >::_Rb_tree_impl" }
%"class.std::map" = type { %"class.std::_Rb_tree" }
%"class.std::vector" = type { %"struct.std::_Vector_base" }
%struct.UT_hash_bucket = type { %struct.UT_hash_handle*, i32, i32 }
%struct.UT_hash_handle = type { %struct.UT_hash_table*, i8*, i8*, %struct.UT_hash_handle*, %struct.UT_hash_handle*, i8*, i32, i32 }
%struct.UT_hash_table = type { %struct.UT_hash_bucket*, i32, i32, i32, %struct.UT_hash_handle*, i64, i32, i32, i32, i32 }
%struct.__darwin_pthread_handler_rec = type { void (i8*)*, i8*, %struct.__darwin_pthread_handler_rec* }
%struct._opaque_pthread_attr_t = type { i64, [56 x i8] }
%struct._opaque_pthread_cond_t = type { i64, [40 x i8] }
%struct._opaque_pthread_t = type { i64, %struct.__darwin_pthread_handler_rec*, [1168 x i8] }
%struct.chan_handle = type { i64, i64 }
%"struct.hash_map<long, rust_port *>::map_entry" = type opaque
%"struct.hash_map<long, rust_task *>::map_entry" = type opaque
%"struct.memory_region::alloc_header" = type { i8 }
%struct.randctx = type { i64, [256 x i64], [256 x i64], i64, i64, i64 }
%struct.registers_t = type { [22 x i64] }
%struct.rust_box = type opaque
%struct.rust_env = type { i64, i64, i64, i8*, i8, i8, i8* }
%struct.rust_fn = type { i64*, %struct.rust_box* }
%struct.rust_hashable_dict = type { %struct.UT_hash_handle, [0 x i8*] }
%struct.rust_obstack_chunk = type { %struct.rust_obstack_chunk*, i64, i64, i64, [0 x i8] }
%struct.rust_opaque_box = type { i64, %struct.type_desc*, %struct.rust_opaque_box*, %struct.rust_opaque_box* }
%struct.rust_shape_tables = type { i8*, i8* }
%struct.rust_task = type { i64, i64, i8, %struct.chan_handle, [8 x i8], %class.context, %struct.stk_seg*, i64, %class.rust_scheduler*, %struct.rust_task_thread*, %class.rust_crate_cache*, %class.rust_kernel*, i8*, %class.rust_task_list*, %"struct.memory_region::alloc_header"*, i8*, %struct.rust_task*, i32, i64, i64*, %class.memory_region, %class.boxed_region, i8, i8, %class.lock_and_signal, %class.hash_map, %class.rust_obstack, i32, %"class.debug::task_debug_info", i64, i8, i8, %struct.stk_seg*, i64, i64, %class.rust_port_selector }
%struct.rust_task_thread = type { %class.rust_thread, i64, %class.rust_log, i32, %class.rust_srv*, i8*, %class.rust_task_list, %class.rust_task_list, %class.rust_task_list, %class.rust_task_list, %class.rust_crate_cache, %struct.randctx, %class.rust_kernel*, %class.rust_scheduler*, i32, i32, %class.lock_and_signal, i64, %struct._opaque_pthread_attr_t, %struct.rust_env*, %class.context, i8, %struct.stk_seg*, %struct.stk_seg*, [8 x i8] }
%struct.rust_vec = type { i64, i64, [0 x i8] }
%"struct.std::_Rb_tree<long, std::pair<const long, rust_scheduler *>, std::_Select1st<std::pair<const long, rust_scheduler *> >, std::less<long>, std::allocator<std::pair<const long, rust_scheduler *> > >::_Rb_tree_impl" = type { %"struct.memory_region::alloc_header", %"struct.std::_Rb_tree_node_base", i64 }
%"struct.std::_Rb_tree_node_base" = type { i32, %"struct.std::_Rb_tree_node_base"*, %"struct.std::_Rb_tree_node_base"*, %"struct.std::_Rb_tree_node_base"* }
%"struct.std::_Vector_base" = type { %"struct.std::_Vector_base<long, std::allocator<long> >::_Vector_impl" }
%"struct.std::_Vector_base<long, std::allocator<long> >::_Vector_impl" = type { i64*, i64*, i64* }
%struct.stk_seg = type { %struct.stk_seg*, %struct.stk_seg*, i64, i32, i64, [0 x i8] }
%struct.type_desc = type { %struct.type_desc**, i64, i64, void (i8*, i8*, %struct.type_desc**, i8*)*, void (i8*, i8*, %struct.type_desc**, i8*)*, void (i8*, i8*, %struct.type_desc**, i8*)*, i8*, void (i8*, i8*, %struct.type_desc**, i8*)*, void (i8*, i8*, %struct.type_desc**, i8*)*, i64, i8*, i8*, %struct.rust_shape_tables*, i64, i64, %struct.UT_hash_handle, i64, [0 x %struct.type_desc*] }
define void @rust_intrinsic_vec_len(i64* nocapture %retptr, i8* nocapture %env, %struct.type_desc* nocapture %ty, %struct.rust_vec** nocapture %vp) nounwind {
%1 = load %struct.rust_vec** %vp, align 8
%2 = getelementptr inbounds %struct.rust_vec* %1, i64 0, i32 0
%3 = load i64* %2, align 8
%4 = getelementptr inbounds %struct.type_desc* %ty, i64 0, i32 1
%5 = load i64* %4, align 8
%6 = udiv i64 %3, %5
store i64 %6, i64* %retptr, align 8
ret void
}
define void @rust_intrinsic_ptr_offset(i8** nocapture %retptr, i8* nocapture %env, %struct.type_desc* nocapture %ty, i8* %ptr, i64 %count) nounwind {
%1 = getelementptr inbounds %struct.type_desc* %ty, i64 0, i32 1
%2 = load i64* %1, align 8
%3 = mul i64 %2, %count
%4 = getelementptr inbounds i8* %ptr, i64 %3
store i8* %4, i8** %retptr, align 8
ret void
}
define void @rust_intrinsic_cast(i8* nocapture %retptr, i8* nocapture %env, %struct.type_desc* nocapture %t1, %struct.type_desc* nocapture %t2, i8* nocapture %src) nounwind {
%1 = getelementptr inbounds %struct.type_desc* %t1, i64 0, i32 1
%2 = load i64* %1, align 8
tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %retptr, i8* %src, i64 %2, i32 1, i1 false)
ret void
}
declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
define void @rust_intrinsic_addr_of(i8** nocapture %retptr, i8* nocapture %env, %struct.type_desc* nocapture %ty, i8* %valptr) nounwind {
store i8* %valptr, i8** %retptr, align 8
ret void
}
define void @rust_intrinsic_call_with_retptr(i8** %retptr, i8* nocapture %env, %struct.type_desc* nocapture %ty, %struct.rust_fn* nocapture %recvfn) {
%1 = getelementptr inbounds %struct.rust_fn* %recvfn, i64 0, i32 0
%2 = load i64** %1, align 8
%3 = bitcast i64* %2 to void (i8**, i8*, i8**)*
%4 = getelementptr inbounds %struct.rust_fn* %recvfn, i64 0, i32 1
%5 = load %struct.rust_box** %4, align 8
%6 = bitcast %struct.rust_box* %5 to i8*
tail call void %3(i8** null, i8* %6, i8** %retptr)
ret void
}
define void @rust_intrinsic_get_type_desc(i8** nocapture %retptr, i8* nocapture %env, %struct.type_desc* %ty) nounwind {
%ty.c = bitcast %struct.type_desc* %ty to i8*
store i8* %ty.c, i8** %retptr, align 8
ret void
}
define void @rust_intrinsic_task_yield(i8** nocapture %retptr, i8* nocapture %env, %struct.rust_task* %task, i8* %killed) {
tail call void @rust_task_yield(%struct.rust_task* %task, i8* %killed)
ret void
}
declare void @rust_task_yield(%struct.rust_task*, i8*)
define void @rust_intrinsic_memmove(i8* nocapture %retptr, i8* nocapture %env, %struct.type_desc* nocapture %ty, i8* nocapture %dst, i8* nocapture %src, i64 %count) nounwind {
%1 = getelementptr inbounds %struct.type_desc* %ty, i64 0, i32 1
%2 = load i64* %1, align 8
%3 = mul i64 %2, %count
tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %3, i32 1, i1 false)
ret void
}
define void @rust_intrinsic_memcpy(i8* nocapture %retptr, i8* nocapture %env, %struct.type_desc* nocapture %ty, i8* nocapture %dst, i8* nocapture %src, i64 %count) nounwind {
%1 = getelementptr inbounds %struct.type_desc* %ty, i64 0, i32 1
%2 = load i64* %1, align 8
%3 = mul i64 %2, %count
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %3, i32 1, i1 false)
ret void
}
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
define void @rust_intrinsic_leak(i8* nocapture %retptr, i8* nocapture %env, %struct.type_desc* nocapture %ty, i8* nocapture %thing) nounwind readnone {
ret void
}
define void @upcall_intrinsic_vec_push(%struct.rust_vec** nocapture %vp, %struct.type_desc* nocapture %elt_ty, i8* nocapture %elt) {
; <label>:0
%1 = load %struct.rust_vec** %vp, align 8
%2 = getelementptr inbounds %struct.rust_vec* %1, i64 0, i32 0
%3 = load i64* %2, align 8
%4 = getelementptr inbounds %struct.type_desc* %elt_ty, i64 0, i32 1
%5 = load i64* %4, align 8
%6 = add i64 %5, %3
%7 = getelementptr inbounds %struct.rust_vec* %1, i64 0, i32 1
%8 = load i64* %7, align 8
%9 = icmp ult i64 %8, %6
br i1 %9, label %10, label %_Z16reserve_vec_fastPP8rust_vecm.exit
; <label>:10 ; preds = %0
%11 = add i64 %6, -1
%12 = lshr i64 %11, 1
%13 = or i64 %12, %11
%14 = lshr i64 %13, 2
%15 = or i64 %14, %13
%16 = lshr i64 %15, 4
%17 = or i64 %16, %15
%18 = lshr i64 %17, 8
%19 = or i64 %18, %17
%20 = lshr i64 %19, 16
%21 = or i64 %20, %19
%22 = lshr i64 %21, 32
%23 = or i64 %22, %21
%24 = add i64 %23, 1
%25 = add i64 %23, 17
%26 = bitcast %struct.rust_vec* %1 to i8*
%27 = tail call i8* @upcall_shared_realloc(i8* %26, i64 %25)
%28 = bitcast i8* %27 to %struct.rust_vec*
store %struct.rust_vec* %28, %struct.rust_vec** %vp, align 8
%29 = getelementptr inbounds i8* %27, i64 8
%30 = bitcast i8* %29 to i64*
store i64 %24, i64* %30, align 8
%.pr = load i64* %4, align 8
%.pre = load %struct.rust_vec** %vp, align 8
%.phi.trans.insert = getelementptr inbounds %struct.rust_vec* %.pre, i64 0, i32 0
%.pre4 = load i64* %.phi.trans.insert, align 8
br label %_Z16reserve_vec_fastPP8rust_vecm.exit
_Z16reserve_vec_fastPP8rust_vecm.exit: ; preds = %0, %10
%31 = phi i64 [ %3, %0 ], [ %.pre4, %10 ]
%32 = phi %struct.rust_vec* [ %1, %0 ], [ %.pre, %10 ]
%33 = phi i64 [ %5, %0 ], [ %.pr, %10 ]
%34 = getelementptr inbounds %struct.rust_vec* %32, i64 0, i32 0
%35 = getelementptr inbounds %struct.rust_vec* %32, i64 0, i32 2, i64 %31
tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %35, i8* %elt, i64 %33, i32 1, i1 false)
%36 = getelementptr inbounds %struct.type_desc* %elt_ty, i64 0, i32 3
%37 = load void (i8*, i8*, %struct.type_desc**, i8*)** %36, align 8
%38 = icmp eq void (i8*, i8*, %struct.type_desc**, i8*)* %37, null
br i1 %38, label %_ZL13copy_elementsP9type_descPvS1_m.exit, label %39
; <label>:39 ; preds = %_Z16reserve_vec_fastPP8rust_vecm.exit
%40 = load i64* %4, align 8
%41 = getelementptr inbounds %struct.type_desc* %elt_ty, i64 0, i32 0
%42 = load %struct.type_desc*** %41, align 8
%43 = icmp sgt i64 %33, 0
br i1 %43, label %.lr.ph.i.preheader, label %_ZL13copy_elementsP9type_descPvS1_m.exit
.lr.ph.i.preheader: ; preds = %39
%scevgep = getelementptr %struct.rust_vec* %32, i64 1, i32 0
%scevgep2 = bitcast i64* %scevgep to i8*
br label %.lr.ph.i
.lr.ph.i: ; preds = %.lr.ph.i.preheader, %.lr.ph.i
%indvar.i = phi i64 [ %indvar.next.i, %.lr.ph.i ], [ 0, %.lr.ph.i.preheader ]
%tmp = mul i64 %40, %indvar.i
%tmp2.i = add i64 %40, %tmp
%tmp3 = add i64 %31, %tmp
%p.01.i = getelementptr i8* %scevgep2, i64 %tmp3
tail call void %37(i8* null, i8* null, %struct.type_desc** %42, i8* %p.01.i)
%44 = icmp slt i64 %tmp2.i, %33
%indvar.next.i = add i64 %indvar.i, 1
br i1 %44, label %.lr.ph.i, label %_ZL13copy_elementsP9type_descPvS1_m.exit
_ZL13copy_elementsP9type_descPvS1_m.exit: ; preds = %.lr.ph.i, %_Z16reserve_vec_fastPP8rust_vecm.exit, %39
%45 = load i64* %4, align 8
%46 = load i64* %34, align 8
%47 = add i64 %46, %45
store i64 %47, i64* %34, align 8
ret void
}
define void @upcall_vec_push(%struct.rust_vec** nocapture %vp, %struct.type_desc* nocapture %elt_ty, i8* nocapture %elt) {
tail call void @upcall_intrinsic_vec_push(%struct.rust_vec** %vp, %struct.type_desc* %elt_ty, i8* %elt)
ret void
}
define void @rust_intrinsic_frame_address(i8** nocapture %p) nounwind {
%1 = tail call i8* @llvm.frameaddress(i32 1)
store i8* %1, i8** %p, align 8
ret void
}
declare i8* @llvm.frameaddress(i32) nounwind readnone
declare i8* @upcall_shared_realloc(i8*, i64)

View File

@ -17,7 +17,6 @@
shared_realloc: ValueRef,
mark: ValueRef,
vec_grow: ValueRef,
vec_push: ValueRef,
str_concat: ValueRef,
cmp_type: ValueRef,
log_type: ValueRef,
@ -41,7 +40,6 @@ fn decl(llmod: ModuleRef, prefix: str, name: str,
}
let d = bind decl(llmod, "upcall_", _, _, _);
let dv = bind decl(llmod, "upcall_", _, _, T_void());
let dvi = bind decl(llmod, "upcall_intrinsic_", _, _, T_void());
let int_t = T_int(targ_cfg);
let size_t = T_size_t(targ_cfg);
@ -66,10 +64,6 @@ fn decl(llmod: ModuleRef, prefix: str, name: str,
d("mark", [T_ptr(T_i8())], int_t),
vec_grow:
dv("vec_grow", [T_ptr(T_ptr(opaque_vec_t)), int_t]),
vec_push:
dvi("vec_push",
[T_ptr(T_ptr(opaque_vec_t)), T_ptr(tydesc_type),
T_ptr(T_i8())]),
str_concat:
d("str_concat", [T_ptr(opaque_vec_t), T_ptr(opaque_vec_t)],
T_ptr(opaque_vec_t)),

View File

@ -237,8 +237,8 @@ fn native_abi(attrs: [ast::attribute]) -> either<str, ast::native_abi> {
option::none {
either::right(ast::native_abi_cdecl)
}
option::some("rust-intrinsic") | option::some("rust-builtin") {
either::right(ast::native_abi_rust_builtin)
option::some("rust-intrinsic") {
either::right(ast::native_abi_rust_intrinsic)
}
option::some("cdecl") {
either::right(ast::native_abi_cdecl)

View File

@ -68,7 +68,6 @@
const tag_item_method: uint = 0x31u;
const tag_impl_iface: uint = 0x32u;
const tag_item_is_intrinsic: uint = 0x33u;
// discriminator value for variants
const tag_disr_val: uint = 0x34u;

View File

@ -26,7 +26,6 @@
export get_impl_iface;
export get_impl_method;
export get_item_path;
export item_is_intrinsic;
export maybe_get_item_ast, found_ast, found, found_parent, not_found;
fn get_symbol(cstore: cstore::cstore, def: ast::def_id) -> str {
@ -190,11 +189,6 @@ fn get_class_method(cstore: cstore::cstore, def: ast::def_id, mname: str)
decoder::get_class_method(cdata, def.node, mname)
}
fn item_is_intrinsic(cstore: cstore::cstore, def: ast::def_id) -> bool {
let cdata = cstore::get_crate_data(cstore, def.crate);
decoder::item_is_intrinsic(cdata, def.node)
}
// Local Variables:
// mode: rust
// fill-column: 78;

View File

@ -41,7 +41,6 @@
export maybe_find_item; // sketchy
export item_type; // sketchy
export maybe_get_item_ast;
export item_is_intrinsic;
// Used internally by astencode:
export translate_def_id;
@ -309,13 +308,6 @@ fn get_class_method(cdata: cmd, id: ast::node_id, name: str) -> ast::def_id {
}
}
fn item_is_intrinsic(cdata: cmd, id: ast::node_id) -> bool {
let mut intrinsic = false;
ebml::tagged_docs(lookup_item(id, cdata.data), tag_item_is_intrinsic,
{|_i| intrinsic = true;});
intrinsic
}
fn get_symbol(data: @[u8], id: ast::node_id) -> str {
ret item_symbol(lookup_item(id, data));
}

View File

@ -676,12 +676,8 @@ fn encode_info_for_native_item(ecx: @encode_ctxt, ebml_w: ebml::writer,
encode_def_id(ebml_w, local_def(nitem.id));
encode_family(ebml_w, purity_fn_family(fn_decl.purity));
encode_type_param_bounds(ebml_w, ecx, tps);
if abi == native_abi_rust_intrinsic {
ebml_w.start_tag(tag_item_is_intrinsic);
ebml_w.end_tag();
}
encode_type(ecx, ebml_w, node_id_to_type(ecx.ccx.tcx, nitem.id));
if abi == native_abi_rust_builtin {
if abi == native_abi_rust_intrinsic {
astencode::encode_inlined_item(ecx, ebml_w, path,
ii_native(nitem));
} else {

View File

@ -92,7 +92,7 @@ fn map_decoded_item(sess: session, map: map, path: path, ii: inlined_item) {
alt ii {
ii_item(i) { /* fallthrough */ }
ii_native(i) {
cx.map.insert(i.id, node_native_item(i, native_abi_rust_builtin,
cx.map.insert(i.id, node_native_item(i, native_abi_rust_intrinsic,
@path));
}
ii_method(impl_did, m) {

View File

@ -124,7 +124,7 @@ fn check_native_fn(tcx: ty::ctxt, decl: ast::fn_decl) {
fn check_item(tcx: ty::ctxt, it: @ast::item) {
alt it.node {
ast::item_native_mod(nmod) if attr::native_abi(it.attrs) !=
either::right(ast::native_abi_rust_builtin) {
either::right(ast::native_abi_rust_intrinsic) {
for ni in nmod.items {
alt ni.node {
ast::native_item_fn(decl, tps) {

View File

@ -1785,9 +1785,7 @@ enum callee_env {
type lval_maybe_callee = {bcx: block,
val: ValueRef,
kind: lval_kind,
env: callee_env,
// Tydescs to pass. Only used to call intrinsics
tds: option<[ValueRef]>};
env: callee_env};
fn null_env_ptr(bcx: block) -> ValueRef {
C_null(T_opaque_box_ptr(bcx.ccx()))
@ -1806,7 +1804,7 @@ fn lval_temp(bcx: block, val: ValueRef) -> lval_result {
fn lval_no_env(bcx: block, val: ValueRef, kind: lval_kind)
-> lval_maybe_callee {
ret {bcx: bcx, val: val, kind: kind, env: is_closure, tds: none};
ret {bcx: bcx, val: val, kind: kind, env: is_closure};
}
fn trans_external_path(ccx: @crate_ctxt, did: ast::def_id, t: ty::t)
@ -1893,7 +1891,7 @@ fn make_mono_id(ccx: @crate_ctxt, item: ast::def_id, substs: [ty::t],
fn monomorphic_fn(ccx: @crate_ctxt, fn_id: ast::def_id, real_substs: [ty::t],
vtables: option<typeck::vtable_res>,
ref_id: option<ast::node_id>)
-> {val: ValueRef, must_cast: bool, intrinsic: bool} {
-> {val: ValueRef, must_cast: bool} {
let _icx = ccx.insn_ctxt("monomorphic_fn");
let mut must_cast = false;
let substs = vec::map(real_substs, {|t|
@ -1911,7 +1909,7 @@ fn monomorphic_fn(ccx: @crate_ctxt, fn_id: ast::def_id, real_substs: [ty::t],
}
alt ccx.monomorphized.find(hash_id) {
some(val) {
ret {val: val, must_cast: must_cast, intrinsic: false};
ret {val: val, must_cast: must_cast};
}
none {}
}
@ -1933,13 +1931,12 @@ fn monomorphic_fn(ccx: @crate_ctxt, fn_id: ast::def_id, real_substs: [ty::t],
}
ast_map::node_variant(v, _, pt) { (pt, v.node.name) }
ast_map::node_method(m, _, pt) { (pt, m.ident) }
ast_map::node_native_item(i, ast::native_abi_rust_builtin, pt)
ast_map::node_native_item(i, ast::native_abi_rust_intrinsic, pt)
{ (pt, i.ident) }
ast_map::node_native_item(_, abi, _) {
// Natives don't have to be monomorphized.
ret {val: get_item_val(ccx, fn_id.node),
must_cast: true,
intrinsic: abi == ast::native_abi_rust_intrinsic};
must_cast: true};
}
ast_map::node_ctor(i, _) {
alt check ccx.tcx.items.get(i.id) {
@ -1968,8 +1965,8 @@ fn monomorphic_fn(ccx: @crate_ctxt, fn_id: ast::def_id, real_substs: [ty::t],
trans_fn(ccx, pt, d, body, lldecl, no_self, psubsts, d_id, none);
}
ast_map::node_native_item(i, _, _) {
native::trans_builtin(ccx, lldecl, i, pt, option::get(psubsts),
ref_id);
native::trans_intrinsic(ccx, lldecl, i, pt, option::get(psubsts),
ref_id);
}
ast_map::node_variant(v, enum_item, _) {
let tvs = ty::enum_variants(ccx.tcx, local_def(enum_item.id));
@ -1998,7 +1995,7 @@ fn monomorphic_fn(ccx: @crate_ctxt, fn_id: ast::def_id, real_substs: [ty::t],
}
}
}
{val: lldecl, must_cast: must_cast, intrinsic: false}
{val: lldecl, must_cast: must_cast}
}
fn maybe_instantiate_inline(ccx: @crate_ctxt, fn_id: ast::def_id)
@ -2062,33 +2059,6 @@ fn maybe_instantiate_inline(ccx: @crate_ctxt, fn_id: ast::def_id)
}
}
fn lval_intrinsic_fn(bcx: block, val: ValueRef, tys: [ty::t],
id: ast::node_id) -> lval_maybe_callee {
let _icx = bcx.insn_ctxt("lval_intrinsic_fn");
fn add_tydesc_params(ccx: @crate_ctxt, llfty: TypeRef, n: uint)
-> TypeRef {
let out_ty = llvm::LLVMGetReturnType(llfty);
let n_args = llvm::LLVMCountParamTypes(llfty);
let args = vec::from_elem(n_args as uint, 0 as TypeRef);
unsafe { llvm::LLVMGetParamTypes(llfty, vec::unsafe::to_ptr(args)); }
T_fn(vec::slice(args, 0u, first_real_arg) +
vec::from_elem(n, T_ptr(ccx.tydesc_type)) +
vec::tailn(args, first_real_arg), out_ty)
}
let mut bcx = bcx;
let ccx = bcx.ccx();
let tds = vec::map(tys, {|t|
let mut ti = none, td = get_tydesc(bcx.ccx(), t, ti);
lazily_emit_all_tydesc_glue(ccx, ti);
td
});
let llfty = type_of_fn_from_ty(ccx, node_id_type(bcx, id));
let val = PointerCast(bcx, val, T_ptr(add_tydesc_params(
ccx, llfty, tys.len())));
{bcx: bcx, val: val, kind: owned, env: null_env, tds: some(tds)}
}
fn lval_static_fn(bcx: block, fn_id: ast::def_id, id: ast::node_id)
-> lval_maybe_callee {
let _icx = bcx.insn_ctxt("lval_static_fn");
@ -2112,14 +2082,13 @@ fn lval_static_fn_inner(bcx: block, fn_id: ast::def_id, id: ast::node_id,
} else { fn_id };
if fn_id.crate == ast::local_crate && tys.len() > 0u {
let mut {val, must_cast, intrinsic} =
let mut {val, must_cast} =
monomorphic_fn(ccx, fn_id, tys, vtables, some(id));
if intrinsic { ret lval_intrinsic_fn(bcx, val, tys, id); }
if must_cast {
val = PointerCast(bcx, val, T_ptr(type_of_fn_from_ty(
ccx, node_id_type(bcx, id))));
}
ret {bcx: bcx, val: val, kind: owned, env: null_env, tds: none};
ret {bcx: bcx, val: val, kind: owned, env: null_env};
}
let mut val = if fn_id.crate == ast::local_crate {
@ -2130,11 +2099,6 @@ fn lval_static_fn_inner(bcx: block, fn_id: ast::def_id, id: ast::node_id,
trans_external_path(ccx, fn_id, tpt.ty)
};
if tys.len() > 0u {
// This is supposed to be an external native function.
// Unfortunately, I found no easy/cheap way to assert that.
if csearch::item_is_intrinsic(ccx.sess.cstore, fn_id) {
ret lval_intrinsic_fn(bcx, val, tys, id);
}
val = PointerCast(bcx, val, T_ptr(type_of_fn_from_ty(
ccx, node_id_type(bcx, id))));
}
@ -2151,7 +2115,7 @@ fn lval_static_fn_inner(bcx: block, fn_id: ast::def_id, id: ast::node_id,
}
}
ret {bcx: bcx, val: val, kind: owned, env: null_env, tds: none};
ret {bcx: bcx, val: val, kind: owned, env: null_env};
}
fn lookup_discriminant(ccx: @crate_ctxt, vid: ast::def_id) -> ValueRef {
@ -2604,7 +2568,7 @@ enum call_args {
// - new_fn_ctxt
// - trans_args
fn trans_args(cx: block, llenv: ValueRef, args: call_args, fn_ty: ty::t,
dest: dest, always_valid_retptr: bool)
dest: dest)
-> {bcx: block, args: [ValueRef], retslot: ValueRef} {
let _icx = cx.insn_ctxt("trans_args");
let mut temp_cleanups = [];
@ -2618,7 +2582,7 @@ fn trans_args(cx: block, llenv: ValueRef, args: call_args, fn_ty: ty::t,
// Arg 0: Output pointer.
let llretslot = alt dest {
ignore {
if ty::type_is_nil(retty) && !always_valid_retptr {
if ty::type_is_nil(retty) {
llvm::LLVMGetUndef(T_ptr(T_nil()))
} else { alloc_ty(bcx, retty) }
}
@ -2704,15 +2668,10 @@ fn trans_call_inner(in_cx: block, fn_expr_ty: ty::t, ret_ty: ty::t,
};
let args_res = {
trans_args(bcx, llenv, args, fn_expr_ty, dest,
option::is_some(f_res.tds))
trans_args(bcx, llenv, args, fn_expr_ty, dest)
};
bcx = args_res.bcx;
let mut llargs = args_res.args;
option::may(f_res.tds) {|vals|
llargs = vec::slice(llargs, 0u, first_real_arg) + vals +
vec::tailn(llargs, first_real_arg);
}
let llretslot = args_res.retslot;
@ -4521,7 +4480,7 @@ fn get_item_val(ccx: @crate_ctxt, id: ast::node_id) -> ValueRef {
}
ast_map::node_native_item(ni, _, pth) {
exprt = true;
native::decl_native_fn(ccx, ni, *pth + [path_name(ni.ident)])
register_fn(ccx, ni.span, *pth + [path_name(ni.ident)], ni.id)
}
ast_map::node_ctor(i, _) {
alt check i.node {

View File

@ -416,7 +416,6 @@ fn trans_bind_1(cx: block, outgoing_fty: ty::t,
args: [option<@ast::expr>], pair_ty: ty::t,
dest: dest) -> block {
let _icx = cx.insn_ctxt("closure::trans_bind1");
assert option::is_none(f_res.tds);
let ccx = cx.ccx();
let mut bound: [@ast::expr] = [];
for argopt: option<@ast::expr> in args {

View File

@ -79,7 +79,7 @@ fn trans_vtable_callee(bcx: block, env: callee_env, vtable: ValueRef,
let vtable = PointerCast(bcx, vtable,
T_ptr(T_array(T_ptr(llfty), n_method + 1u)));
let mptr = Load(bcx, GEPi(bcx, vtable, [0, n_method as int]));
{bcx: bcx, val: mptr, kind: owned, env: env, tds: none}
{bcx: bcx, val: mptr, kind: owned, env: env}
}
fn method_with_name(ccx: @crate_ctxt, impl_id: ast::def_id,

View File

@ -16,7 +16,7 @@
import util::ppaux::ty_to_str;
export link_name, trans_native_mod, register_crust_fn, trans_crust_fn,
decl_native_fn, trans_builtin;
trans_intrinsic;
enum x86_64_reg_class {
no_class,
@ -730,11 +730,7 @@ fn build_ret(bcx: block, _tys: @c_stack_tys,
}
let mut cc = alt abi {
ast::native_abi_rust_intrinsic {
for item in native_mod.items { get_item_val(ccx, item.id); }
ret;
}
ast::native_abi_rust_builtin { ret; }
ast::native_abi_rust_intrinsic { ret; }
ast::native_abi_cdecl { lib::llvm::CCallConv }
ast::native_abi_stdcall { lib::llvm::X86StdcallCallConv }
};
@ -756,9 +752,9 @@ fn build_ret(bcx: block, _tys: @c_stack_tys,
}
}
fn trans_builtin(ccx: @crate_ctxt, decl: ValueRef, item: @ast::native_item,
path: ast_map::path, substs: param_substs,
ref_id: option<ast::node_id>) {
fn trans_intrinsic(ccx: @crate_ctxt, decl: ValueRef, item: @ast::native_item,
path: ast_map::path, substs: param_substs,
ref_id: option<ast::node_id>) {
let fcx = new_fn_ctxt_w_id(ccx, path, decl, item.id, none,
some(substs), some(item.span));
let bcx = top_scope_block(fcx, none), lltop = bcx.llbb;
@ -1006,31 +1002,3 @@ fn abi_of_native_fn(ccx: @crate_ctxt, i: @ast::native_item)
}
}
}
fn decl_native_fn(ccx: @crate_ctxt, i: @ast::native_item,
pth: ast_map::path) -> ValueRef {
let _icx = ccx.insn_ctxt("native::decl_native_fn");
alt i.node {
ast::native_item_fn(_, _) {
let node_type = ty::node_id_to_type(ccx.tcx, i.id);
alt abi_of_native_fn(ccx, i) {
ast::native_abi_rust_intrinsic {
// For intrinsics: link the function directly to the intrinsic
// function itself.
let fn_type = type_of_fn_from_ty(ccx, node_type);
let ri_name = "rust_intrinsic_" + native::link_name(i);
ccx.item_symbols.insert(i.id, ri_name);
get_extern_fn(ccx.externs, ccx.llmod, ri_name,
lib::llvm::CCallConv, fn_type)
}
ast::native_abi_cdecl | ast::native_abi_stdcall |
ast::native_abi_rust_builtin {
// For true external functions: create a rust wrapper
// and link to that. The rust wrapper will handle
// switching to the C stack.
register_fn(ccx, i.span, pth, i.id)
}
}
}
}
}

View File

@ -52,11 +52,7 @@ fn type_uses_for(ccx: @crate_ctxt, fn_id: def_id, n_tps: uint)
_ {}
}
// FIXME handle external native functions in a more efficient way
if fn_id_loc.crate != local_crate {
if csearch::item_is_intrinsic(ccx.sess.cstore, fn_id) {
uint::range(0u, n_tps) {|n| cx.uses[n] |= use_tydesc;}
}
let uses = vec::from_mut(cx.uses);
ccx.type_use_cache.insert(fn_id, uses);
ret uses;
@ -73,8 +69,6 @@ fn type_uses_for(ccx: @crate_ctxt, fn_id: def_id, n_tps: uint)
}
ast_map::node_native_item(i@@{node: native_item_fn(_, _), _}, abi, _) {
if abi == native_abi_rust_intrinsic {
uint::range(0u, n_tps) {|n| cx.uses[n] |= use_tydesc;}
} else if abi == native_abi_rust_builtin {
let flags = alt check i.ident {
"size_of" | "align_of" | "init" |
"reinterpret_cast" { use_repr }

View File

@ -948,8 +948,8 @@ fn convert(tcx: ty::ctxt, it: @ast::item) {
ast::item_mod(_) {}
ast::item_native_mod(m) {
if front::attr::native_abi(it.attrs) ==
either::right(ast::native_abi_rust_builtin) {
for item in m.items { check_builtin_type(tcx, item); }
either::right(ast::native_abi_rust_intrinsic) {
for item in m.items { check_intrinsic_type(tcx, item); }
}
}
ast::item_enum(variants, ty_params) {
@ -1414,7 +1414,7 @@ fn resolve_type_vars_in_block(fcx: @fn_ctxt, blk: ast::blk) -> bool {
}
}
fn check_builtin_type(tcx: ty::ctxt, it: @ast::native_item) {
fn check_intrinsic_type(tcx: ty::ctxt, it: @ast::native_item) {
fn param(tcx: ty::ctxt, n: uint) -> ty::t {
ty::mk_param(tcx, n, local_def(0))
}
@ -1432,7 +1432,7 @@ fn arg(m: ast::rmode, ty: ty::t) -> ty::arg {
"addr_of" { (1u, [arg(ast::by_ref, param(tcx, 0u))],
ty::mk_imm_ptr(tcx, param(tcx, 0u))) }
other {
tcx.sess.span_err(it.span, "unrecognized builtin function: `" +
tcx.sess.span_err(it.span, "unrecognized intrinsic function: `" +
other + "`");
ret;
}
@ -1444,11 +1444,11 @@ fn arg(m: ast::rmode, ty: ty::t) -> ty::arg {
let i_ty = ty_of_native_item(tcx, m_collect, it);
let i_n_tps = (*i_ty.bounds).len();
if i_n_tps != n_tps {
tcx.sess.span_err(it.span, #fmt("builtin function has wrong number \
tcx.sess.span_err(it.span, #fmt("intrinsic has wrong number \
of type parameters. found %u, \
expected %u", i_n_tps, n_tps));
} else if !ty::same_type(tcx, i_ty.ty, fty) {
tcx.sess.span_err(it.span, #fmt("builtin function has wrong type. \
tcx.sess.span_err(it.span, #fmt("intrinsic has wrong type. \
expected %s", ty_to_str(tcx, fty)));
}
}

View File

@ -557,7 +557,6 @@ enum ret_style {
#[auto_serialize]
enum native_abi {
native_abi_rust_intrinsic,
native_abi_rust_builtin,
native_abi_cdecl,
native_abi_stdcall,
}