rust/src/rt/rust_uv.cpp

298 lines
7.9 KiB
C++
Raw Normal View History

#ifdef __WIN32__
// For alloca
#include <malloc.h>
#endif
#include "rust_globals.h"
#include "rust_task.h"
#include "uv.h"
// crust fn pointers
typedef void (*crust_async_op_cb)(uv_loop_t* loop, void* data,
2012-04-01 01:12:06 -05:00
uv_async_t* op_handle);
typedef void (*crust_simple_cb)(uint8_t* id_buf, void* loop_data);
typedef void (*crust_close_cb)(uint8_t* id_buf, void* handle,
2012-04-01 01:12:06 -05:00
void* data);
// data types
#define RUST_UV_HANDLE_LEN 16
struct handle_data {
2012-04-01 01:12:06 -05:00
uint8_t id_buf[RUST_UV_HANDLE_LEN];
crust_simple_cb cb;
crust_close_cb close_cb;
};
// helpers
static void*
current_kernel_malloc(size_t size, const char* tag) {
void* ptr = rust_get_current_task()->kernel->malloc(size, tag);
return ptr;
}
static void
current_kernel_free(void* ptr) {
rust_get_current_task()->kernel->free(ptr);
}
static handle_data*
new_handle_data_from(uint8_t* buf, crust_simple_cb cb) {
2012-04-01 01:12:06 -05:00
handle_data* data = (handle_data*)current_kernel_malloc(
sizeof(handle_data),
"handle_data");
memcpy(data->id_buf, buf, RUST_UV_HANDLE_LEN);
data->cb = cb;
return data;
}
// libuv callback impls
static void
native_crust_async_op_cb(uv_async_t* handle, int status) {
crust_async_op_cb cb = (crust_async_op_cb)handle->data;
2012-04-01 01:12:06 -05:00
void* loop_data = handle->loop->data;
cb(handle->loop, loop_data, handle);
}
static void
native_async_cb(uv_async_t* handle, int status) {
2012-04-01 01:12:06 -05:00
handle_data* handle_d = (handle_data*)handle->data;
void* loop_data = handle->loop->data;
handle_d->cb(handle_d->id_buf, loop_data);
}
static void
native_timer_cb(uv_timer_t* handle, int status) {
2012-04-01 01:12:06 -05:00
handle_data* handle_d = (handle_data*)handle->data;
void* loop_data = handle->loop->data;
handle_d->cb(handle_d->id_buf, loop_data);
}
static void
native_close_cb(uv_handle_t* handle) {
2012-04-01 01:12:06 -05:00
handle_data* data = (handle_data*)handle->data;
data->close_cb(data->id_buf, handle, handle->loop->data);
}
static void
native_close_op_cb(uv_handle_t* op_handle) {
current_kernel_free(op_handle);
// uv_run() should return after this..
}
// native fns bound in rust
adding uv::direct and beginning to work out tcp request case lots of changes, here.. should've commited sooner. - added uv::direct module that contains rust fns that map, neatly, to the libuv c library as much as possible. they operate on ptrs to libuv structs mapped in rust, as much as possible (there are some notable exceptions). these uv::direct fns should only take inputs from rust and, as neccesary, translate them into C-friendly types and then pass to the C functions. We want to them to return ints, as the libuv functions do, so we can start tracking status. - the notable exceptions for structs above is due to ref gh-1402, which prevents us from passing structs, by value, across the Rust<->C barrier (they turn to garbage, pretty much). So in the cases where we get back by-val structs from C (uv_buf_init(), uv_ip4_addr(), uv_err_t in callbacks) , we're going to use *ctypes::void (or just errnum ints for uv_err_t) until gh-1402 is resolved. - using crust functions, in these uv::direct fns, for callbacks from libuv, will eschew uv_err_t, if possible, in favor a struct int.. if at all possible (probably isn't.. hm.. i know libuv wants to eventually move to replace uv_err_t with an int, as well.. so hm). - started flushing out a big, gnarly test case to exercise the tcp request side of the uv::direct functions. I'm at the point where, after the connection is established, we write to the stream... when the writing is done, we will read from it, then tear the whole thing down. overall, it turns out that doing "close to the metal" interaction with c libraries is painful (and more chatty) when orchestrated from rust. My understanding is that not much, at all, is written in this fashion in the existant core/std codebase.. malloc'ing in C has been preferred, from what I've gathered. So we're treading new ground, here!
2012-03-15 23:42:07 -05:00
extern "C" void
rust_uv_free(void* ptr) {
current_kernel_free(ptr);
}
extern "C" void*
rust_uv_loop_new() {
return (void*)uv_loop_new();
}
extern "C" void
rust_uv_loop_delete(uv_loop_t* loop) {
// FIXME: This is a workaround for #1815. libev uses realloc(0) to
// free the loop, which valgrind doesn't like. We have suppressions
// to make valgrind ignore them.
//
// Valgrind also has a sanity check when collecting allocation backtraces
// that the stack pointer must be at least 512 bytes into the stack (at
// least 512 bytes of frames must have come before). When this is not
// the case it doesn't collect the backtrace.
//
// Unfortunately, with our spaghetti stacks that valgrind check triggers
// sometimes and we don't get the backtrace for the realloc(0), it
// fails to be suppressed, and it gets reported as 0 bytes lost
// from a malloc with no backtrace.
//
// This pads our stack with some extra space before deleting the loop
alloca(512);
uv_loop_delete(loop);
}
extern "C" void
rust_uv_loop_set_data(uv_loop_t* loop, void* data) {
loop->data = data;
}
extern "C" void*
rust_uv_bind_op_cb(uv_loop_t* loop, crust_async_op_cb cb) {
2012-04-01 01:12:06 -05:00
uv_async_t* async = (uv_async_t*)current_kernel_malloc(
sizeof(uv_async_t),
"uv_async_t");
uv_async_init(loop, async, native_crust_async_op_cb);
async->data = (void*)cb;
// decrement the ref count, so that our async bind
// doesn't count towards keeping the loop alive
//uv_unref(loop);
return async;
}
extern "C" void
rust_uv_stop_op_cb(uv_handle_t* op_handle) {
uv_close(op_handle, native_close_op_cb);
}
extern "C" void
rust_uv_run(uv_loop_t* loop) {
2012-04-01 01:12:06 -05:00
uv_run(loop);
}
extern "C" void
rust_uv_close(uv_handle_t* handle, crust_close_cb cb) {
2012-04-01 01:12:06 -05:00
handle_data* data = (handle_data*)handle->data;
data->close_cb = cb;
uv_close(handle, native_close_cb);
}
extern "C" void
rust_uv_close_async(uv_async_t* handle) {
current_kernel_free(handle->data);
current_kernel_free(handle);
}
extern "C" void
rust_uv_close_timer(uv_async_t* handle) {
current_kernel_free(handle->data);
current_kernel_free(handle);
}
extern "C" void
rust_uv_async_send(uv_async_t* handle) {
uv_async_send(handle);
}
extern "C" void*
rust_uv_async_init(uv_loop_t* loop, crust_simple_cb cb,
2012-04-01 01:12:06 -05:00
uint8_t* buf) {
uv_async_t* async = (uv_async_t*)current_kernel_malloc(
sizeof(uv_async_t),
"uv_async_t");
uv_async_init(loop, async, native_async_cb);
handle_data* data = new_handle_data_from(buf, cb);
async->data = data;
2012-04-01 01:12:06 -05:00
return async;
}
extern "C" void*
rust_uv_timer_init(uv_loop_t* loop, crust_simple_cb cb,
2012-04-01 01:12:06 -05:00
uint8_t* buf) {
uv_timer_t* new_timer = (uv_timer_t*)current_kernel_malloc(
sizeof(uv_timer_t),
"uv_timer_t");
uv_timer_init(loop, new_timer);
handle_data* data = new_handle_data_from(buf, cb);
new_timer->data = data;
2012-04-01 01:12:06 -05:00
return new_timer;
}
extern "C" void
rust_uv_timer_start(uv_timer_t* the_timer, uint32_t timeout,
2012-04-01 01:12:06 -05:00
uint32_t repeat) {
uv_timer_start(the_timer, native_timer_cb, timeout, repeat);
}
extern "C" void
rust_uv_timer_stop(uv_timer_t* the_timer) {
2012-04-01 01:12:06 -05:00
uv_timer_stop(the_timer);
}
2012-01-21 19:29:52 -06:00
adding uv::direct and beginning to work out tcp request case lots of changes, here.. should've commited sooner. - added uv::direct module that contains rust fns that map, neatly, to the libuv c library as much as possible. they operate on ptrs to libuv structs mapped in rust, as much as possible (there are some notable exceptions). these uv::direct fns should only take inputs from rust and, as neccesary, translate them into C-friendly types and then pass to the C functions. We want to them to return ints, as the libuv functions do, so we can start tracking status. - the notable exceptions for structs above is due to ref gh-1402, which prevents us from passing structs, by value, across the Rust<->C barrier (they turn to garbage, pretty much). So in the cases where we get back by-val structs from C (uv_buf_init(), uv_ip4_addr(), uv_err_t in callbacks) , we're going to use *ctypes::void (or just errnum ints for uv_err_t) until gh-1402 is resolved. - using crust functions, in these uv::direct fns, for callbacks from libuv, will eschew uv_err_t, if possible, in favor a struct int.. if at all possible (probably isn't.. hm.. i know libuv wants to eventually move to replace uv_err_t with an int, as well.. so hm). - started flushing out a big, gnarly test case to exercise the tcp request side of the uv::direct functions. I'm at the point where, after the connection is established, we write to the stream... when the writing is done, we will read from it, then tear the whole thing down. overall, it turns out that doing "close to the metal" interaction with c libraries is painful (and more chatty) when orchestrated from rust. My understanding is that not much, at all, is written in this fashion in the existant core/std codebase.. malloc'ing in C has been preferred, from what I've gathered. So we're treading new ground, here!
2012-03-15 23:42:07 -05:00
extern "C" int
rust_uv_tcp_init(uv_loop_t* loop, uv_tcp_t* handle) {
return uv_tcp_init(loop, handle);
}
extern "C" size_t
rust_uv_helper_uv_tcp_t_size() {
return sizeof(uv_tcp_t);
}
extern "C" size_t
rust_uv_helper_uv_connect_t_size() {
return sizeof(uv_connect_t);
}
extern "C" size_t
rust_uv_helper_uv_buf_t_size() {
return sizeof(uv_buf_t);
}
extern "C" size_t
rust_uv_helper_uv_write_t_size() {
return sizeof(uv_write_t);
}
extern "C" size_t
rust_uv_helper_uv_err_t_size() {
return sizeof(uv_err_t);
}
extern "C" size_t
rust_uv_helper_sockaddr_in_size() {
return sizeof(sockaddr_in);
}
extern "C" uv_stream_t*
rust_uv_get_stream_handle_for_connect(uv_connect_t* connect) {
return connect->handle;
}
extern "C" uv_buf_t
rust_uv_buf_init(char* base, size_t len) {
return uv_buf_init(base, len);
}
extern "C" uv_loop_t*
rust_uv_get_loop_for_uv_handle(uv_handle_t* handle) {
return handle->loop;
}
extern "C" void*
rust_uv_get_data_for_uv_handle(uv_handle_t* handle) {
return handle->data;
}
extern "C" void
rust_uv_set_data_for_uv_handle(uv_handle_t* handle,
void* data) {
handle->data = data;
}
extern "C" void*
rust_uv_get_data_for_req(uv_req_t* req) {
return req->data;
}
extern "C" void
rust_uv_set_data_for_req(uv_req_t* req, void* data) {
req->data = data;
}
extern "C" uv_err_t
rust_uv_last_error(uv_loop_t* loop) {
return uv_last_error(loop);
}
extern "C" int
rust_uv_tcp_connect(uv_connect_t* connect_ptr,
uv_tcp_t* tcp_ptr,
void* addr_ptr,
uv_connect_cb cb) {
//return uv_tcp_connect(connect_ptr, tcp_ptr, addr, cb);
printf("inside rust_uv_tcp_connect\n");
sockaddr_in addr_tmp = *((sockaddr_in*)addr_ptr);
sockaddr_in addr = addr_tmp;
printf("before tcp_connect .. port: %d\n", addr.sin_port);
int result = uv_tcp_connect(connect_ptr, tcp_ptr, addr, cb);
printf ("leaving rust_uv_tcp_connect.. and result: %d\n", result);
return result;
}
extern "C" void*
rust_uv_ip4_addr(const char* ip, int port) {
sockaddr_in* addr_ptr = (sockaddr_in*)current_kernel_malloc(
sizeof(sockaddr_in),
"sockaddr_in");
printf("before creating addr_ptr.. ip %s port %d\n", ip, port);
*addr_ptr = uv_ip4_addr("173.194.33.40", 80);
printf("after creating .. port: %d\n", addr_ptr->sin_port);
return (void*)addr_ptr;
}