2011-09-20 16:47:59 -07:00
|
|
|
#include "rust_cc.h"
|
2011-08-19 13:03:46 -07:00
|
|
|
#include "rust_gc.h"
|
2010-06-23 21:03:09 -07:00
|
|
|
#include "rust_internal.h"
|
2011-09-27 17:14:18 -07:00
|
|
|
#include "rust_scheduler.h"
|
2011-09-15 12:52:50 -07:00
|
|
|
#include "rust_unwind.h"
|
2011-06-15 22:04:31 -07:00
|
|
|
#include "rust_upcall.h"
|
2011-09-07 11:44:34 -07:00
|
|
|
#include <stdint.h>
|
2010-06-23 21:03:09 -07:00
|
|
|
|
|
|
|
// Upcalls.
|
|
|
|
|
2011-07-06 15:06:30 -07:00
|
|
|
#ifdef __i386__
|
|
|
|
void
|
|
|
|
check_stack(rust_task *task) {
|
|
|
|
void *esp;
|
|
|
|
asm volatile("movl %%esp,%0" : "=r" (esp));
|
|
|
|
if (esp < task->stk->data)
|
|
|
|
task->kernel->fatal("Out of stack space, sorry");
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#warning "Stack checks are not supported on this architecture"
|
|
|
|
void
|
|
|
|
check_stack(rust_task *task) {
|
|
|
|
// TODO
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-09-02 11:28:19 -07:00
|
|
|
// Copy elements from one vector to another,
|
|
|
|
// dealing with reference counts
|
|
|
|
static inline void
|
|
|
|
copy_elements(rust_task *task, type_desc *elem_t,
|
|
|
|
void *pdst, void *psrc, size_t n)
|
|
|
|
{
|
|
|
|
char *dst = (char *)pdst, *src = (char *)psrc;
|
|
|
|
memmove(dst, src, n);
|
|
|
|
|
|
|
|
// increment the refcount of each element of the vector
|
|
|
|
if (elem_t->take_glue) {
|
|
|
|
glue_fn *take_glue = elem_t->take_glue;
|
|
|
|
size_t elem_size = elem_t->size;
|
|
|
|
const type_desc **tydescs = elem_t->first_param;
|
|
|
|
for (char *p = dst; p < dst+n; p += elem_size) {
|
|
|
|
take_glue(NULL, task, NULL, tydescs, p);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-31 14:36:51 -07:00
|
|
|
extern "C" CDECL void
|
|
|
|
upcall_fail(rust_task *task,
|
|
|
|
char const *expr,
|
|
|
|
char const *file,
|
|
|
|
size_t line) {
|
2010-06-23 21:03:09 -07:00
|
|
|
LOG_UPCALL_ENTRY(task);
|
2011-04-19 12:21:57 +02:00
|
|
|
LOG_ERR(task, upcall, "upcall fail '%s', %s:%" PRIdPTR, expr, file, line);
|
2011-08-10 12:57:53 -07:00
|
|
|
task->fail();
|
2010-06-23 21:03:09 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
extern "C" CDECL uintptr_t
|
2011-09-27 17:14:18 -07:00
|
|
|
upcall_malloc(rust_task *unused_task, size_t nbytes, type_desc *td) {
|
|
|
|
rust_task *task = rust_scheduler::get_task();
|
2010-06-23 21:03:09 -07:00
|
|
|
LOG_UPCALL_ENTRY(task);
|
|
|
|
|
2011-04-19 12:21:57 +02:00
|
|
|
LOG(task, mem,
|
2011-09-20 14:20:16 -07:00
|
|
|
"upcall malloc(%" PRIdPTR ", 0x%" PRIxPTR ")",
|
|
|
|
nbytes, td);
|
2011-07-06 12:48:43 -07:00
|
|
|
|
2011-08-19 13:03:46 -07:00
|
|
|
gc::maybe_gc(task);
|
2011-09-20 16:47:59 -07:00
|
|
|
cc::maybe_cc(task);
|
2011-08-19 13:03:46 -07:00
|
|
|
|
2011-07-18 12:02:26 -07:00
|
|
|
// TODO: Maybe use dladdr here to find a more useful name for the
|
|
|
|
// type_desc.
|
|
|
|
|
|
|
|
void *p = task->malloc(nbytes, "tdesc", td);
|
2011-09-20 19:15:46 -07:00
|
|
|
memset(p, '\0', nbytes);
|
2011-07-06 12:48:43 -07:00
|
|
|
|
2011-09-20 15:35:14 -07:00
|
|
|
task->local_allocs[p] = td;
|
2011-09-23 11:42:20 -07:00
|
|
|
debug::maybe_track_origin(task, p);
|
2011-09-20 15:35:14 -07:00
|
|
|
|
2011-04-19 12:21:57 +02:00
|
|
|
LOG(task, mem,
|
2011-09-20 14:20:16 -07:00
|
|
|
"upcall malloc(%" PRIdPTR ", 0x%" PRIxPTR ") = 0x%" PRIxPTR,
|
|
|
|
nbytes, td, (uintptr_t)p);
|
2010-06-23 21:03:09 -07:00
|
|
|
return (uintptr_t) p;
|
|
|
|
}
|
|
|
|
|
2010-07-19 14:05:18 -07:00
|
|
|
/**
|
|
|
|
* Called whenever an object's ref count drops to zero.
|
|
|
|
*/
|
2010-06-23 21:03:09 -07:00
|
|
|
extern "C" CDECL void
|
2010-08-31 14:36:51 -07:00
|
|
|
upcall_free(rust_task *task, void* ptr, uintptr_t is_gc) {
|
2010-06-23 21:03:09 -07:00
|
|
|
LOG_UPCALL_ENTRY(task);
|
2011-07-06 12:48:43 -07:00
|
|
|
|
2011-06-28 12:15:41 -07:00
|
|
|
rust_scheduler *sched = task->sched;
|
|
|
|
DLOG(sched, mem,
|
2010-09-29 17:22:07 -07:00
|
|
|
"upcall free(0x%" PRIxPTR ", is_gc=%" PRIdPTR ")",
|
|
|
|
(uintptr_t)ptr, is_gc);
|
2011-09-20 15:35:14 -07:00
|
|
|
|
|
|
|
task->local_allocs.erase(ptr);
|
2011-09-23 11:42:20 -07:00
|
|
|
debug::maybe_untrack_origin(task, ptr);
|
|
|
|
|
2010-06-28 18:53:16 -07:00
|
|
|
task->free(ptr, (bool) is_gc);
|
|
|
|
}
|
|
|
|
|
2011-07-05 22:55:41 -07:00
|
|
|
extern "C" CDECL uintptr_t
|
|
|
|
upcall_shared_malloc(rust_task *task, size_t nbytes, type_desc *td) {
|
|
|
|
LOG_UPCALL_ENTRY(task);
|
|
|
|
|
|
|
|
LOG(task, mem,
|
|
|
|
"upcall shared_malloc(%" PRIdPTR ", 0x%" PRIxPTR ")",
|
|
|
|
nbytes, td);
|
2011-07-18 12:02:26 -07:00
|
|
|
void *p = task->kernel->malloc(nbytes, "shared malloc");
|
2011-09-20 19:15:46 -07:00
|
|
|
memset(p, '\0', nbytes);
|
2011-07-05 22:55:41 -07:00
|
|
|
LOG(task, mem,
|
|
|
|
"upcall shared_malloc(%" PRIdPTR ", 0x%" PRIxPTR
|
|
|
|
") = 0x%" PRIxPTR,
|
|
|
|
nbytes, td, (uintptr_t)p);
|
|
|
|
return (uintptr_t) p;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Called whenever an object's ref count drops to zero.
|
|
|
|
*/
|
|
|
|
extern "C" CDECL void
|
|
|
|
upcall_shared_free(rust_task *task, void* ptr) {
|
|
|
|
LOG_UPCALL_ENTRY(task);
|
2011-07-12 11:53:45 -07:00
|
|
|
|
2011-07-05 22:55:41 -07:00
|
|
|
rust_scheduler *sched = task->sched;
|
|
|
|
DLOG(sched, mem,
|
|
|
|
"upcall shared_free(0x%" PRIxPTR")",
|
|
|
|
(uintptr_t)ptr);
|
|
|
|
task->kernel->free(ptr);
|
|
|
|
}
|
|
|
|
|
2010-06-23 21:03:09 -07:00
|
|
|
extern "C" CDECL type_desc *
|
2010-07-28 14:00:44 -07:00
|
|
|
upcall_get_type_desc(rust_task *task,
|
2011-05-26 18:20:48 -07:00
|
|
|
void *curr_crate, // ignored, legacy compat.
|
2010-07-28 14:00:44 -07:00
|
|
|
size_t size,
|
|
|
|
size_t align,
|
|
|
|
size_t n_descs,
|
2011-08-24 18:36:51 -07:00
|
|
|
type_desc const **descs,
|
|
|
|
uintptr_t n_obj_params) {
|
2011-07-06 15:06:30 -07:00
|
|
|
check_stack(task);
|
2010-06-23 21:03:09 -07:00
|
|
|
LOG_UPCALL_ENTRY(task);
|
2011-07-06 12:48:43 -07:00
|
|
|
|
2011-04-19 12:21:57 +02:00
|
|
|
LOG(task, cache, "upcall get_type_desc with size=%" PRIdPTR
|
|
|
|
", align=%" PRIdPTR ", %" PRIdPTR " descs", size, align,
|
|
|
|
n_descs);
|
2011-05-26 18:20:48 -07:00
|
|
|
rust_crate_cache *cache = task->get_crate_cache();
|
2011-08-24 18:36:51 -07:00
|
|
|
type_desc *td = cache->get_type_desc(size, align, n_descs, descs,
|
|
|
|
n_obj_params);
|
2011-04-19 12:21:57 +02:00
|
|
|
LOG(task, cache, "returning tydesc 0x%" PRIxPTR, td);
|
2010-06-23 21:03:09 -07:00
|
|
|
return td;
|
|
|
|
}
|
|
|
|
|
2011-07-05 22:55:41 -07:00
|
|
|
extern "C" CDECL void
|
2011-08-25 10:18:02 +02:00
|
|
|
upcall_vec_grow(rust_task* task, rust_vec** vp, size_t new_sz) {
|
2011-07-05 22:55:41 -07:00
|
|
|
LOG_UPCALL_ENTRY(task);
|
2011-08-29 22:35:29 +02:00
|
|
|
reserve_vec(task, vp, new_sz);
|
2011-08-25 10:18:02 +02:00
|
|
|
(*vp)->fill = new_sz;
|
2011-07-05 22:55:41 -07:00
|
|
|
}
|
2011-08-16 19:48:47 -07:00
|
|
|
|
2011-08-24 13:53:34 +02:00
|
|
|
extern "C" CDECL void
|
2011-08-25 10:18:02 +02:00
|
|
|
upcall_vec_push(rust_task* task, rust_vec** vp, type_desc* elt_ty,
|
2011-08-29 22:46:25 +02:00
|
|
|
void* elt) {
|
2011-08-24 13:53:34 +02:00
|
|
|
LOG_UPCALL_ENTRY(task);
|
2011-08-29 22:35:29 +02:00
|
|
|
size_t new_sz = (*vp)->fill + elt_ty->size;
|
|
|
|
reserve_vec(task, vp, new_sz);
|
2011-08-25 10:18:02 +02:00
|
|
|
rust_vec* v = *vp;
|
|
|
|
copy_elements(task, elt_ty, &v->data[0] + v->fill, elt, elt_ty->size);
|
|
|
|
v->fill += elt_ty->size;
|
2011-08-24 13:53:34 +02:00
|
|
|
}
|
|
|
|
|
2011-08-16 19:48:47 -07:00
|
|
|
/**
|
|
|
|
* Returns a token that can be used to deallocate all of the allocated space
|
|
|
|
* space in the dynamic stack.
|
|
|
|
*/
|
|
|
|
extern "C" CDECL void *
|
|
|
|
upcall_dynastack_mark(rust_task *task) {
|
2011-08-31 19:19:05 -07:00
|
|
|
return task->dynastack.mark();
|
2011-08-16 19:48:47 -07:00
|
|
|
}
|
|
|
|
|
2011-08-31 19:19:05 -07:00
|
|
|
/**
|
|
|
|
* Allocates space in the dynamic stack and returns it.
|
|
|
|
*
|
|
|
|
* FIXME: Deprecated since dynamic stacks need to be self-describing for GC.
|
|
|
|
*/
|
2011-08-16 19:48:47 -07:00
|
|
|
extern "C" CDECL void *
|
|
|
|
upcall_dynastack_alloc(rust_task *task, size_t sz) {
|
2011-08-31 19:19:05 -07:00
|
|
|
return sz ? task->dynastack.alloc(sz, NULL) : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Allocates space associated with a type descriptor in the dynamic stack and
|
|
|
|
* returns it.
|
|
|
|
*/
|
|
|
|
extern "C" CDECL void *
|
|
|
|
upcall_dynastack_alloc_2(rust_task *task, size_t sz, type_desc *ty) {
|
|
|
|
return sz ? task->dynastack.alloc(sz, ty) : NULL;
|
2011-08-16 19:48:47 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Frees space in the dynamic stack. */
|
|
|
|
extern "C" CDECL void
|
|
|
|
upcall_dynastack_free(rust_task *task, void *ptr) {
|
|
|
|
return task->dynastack.free(ptr);
|
|
|
|
}
|
|
|
|
|
2011-09-28 11:31:44 -07:00
|
|
|
/**
|
|
|
|
* Allocates |nbytes| bytes in the C stack and returns a pointer to the start
|
|
|
|
* of the allocated space.
|
|
|
|
*/
|
|
|
|
extern "C" CDECL void *
|
|
|
|
upcall_alloc_c_stack(size_t nbytes) {
|
|
|
|
rust_scheduler *sched = rust_scheduler::get_task()->sched;
|
|
|
|
return sched->c_context.alloc_stack(nbytes);
|
|
|
|
}
|
|
|
|
|
2011-09-07 11:44:34 -07:00
|
|
|
extern "C" _Unwind_Reason_Code
|
|
|
|
__gxx_personality_v0(int version,
|
|
|
|
_Unwind_Action actions,
|
|
|
|
uint64_t exception_class,
|
|
|
|
_Unwind_Exception *ue_header,
|
|
|
|
_Unwind_Context *context);
|
|
|
|
|
|
|
|
extern "C" _Unwind_Reason_Code
|
|
|
|
upcall_rust_personality(int version,
|
|
|
|
_Unwind_Action actions,
|
|
|
|
uint64_t exception_class,
|
|
|
|
_Unwind_Exception *ue_header,
|
|
|
|
_Unwind_Context *context) {
|
|
|
|
return __gxx_personality_v0(version,
|
|
|
|
actions,
|
|
|
|
exception_class,
|
|
|
|
ue_header,
|
|
|
|
context);
|
|
|
|
}
|
|
|
|
|
2010-06-23 21:03:09 -07:00
|
|
|
//
|
|
|
|
// Local Variables:
|
|
|
|
// mode: C++
|
|
|
|
// fill-column: 78;
|
|
|
|
// indent-tabs-mode: nil
|
|
|
|
// c-basic-offset: 4
|
|
|
|
// buffer-file-coding-system: utf-8-unix
|
2011-07-13 13:51:20 -07:00
|
|
|
// compile-command: "make -k -C $RBUILD 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
|
2010-06-23 21:03:09 -07:00
|
|
|
// End:
|
|
|
|
//
|