2012-07-04 16:53:12 -05:00
|
|
|
//! High-level interface to libuv's TCP functionality
|
2012-04-26 16:30:22 -05:00
|
|
|
|
2012-09-04 13:23:53 -05:00
|
|
|
use ip = net_ip;
|
|
|
|
use uv::iotask;
|
|
|
|
use uv::iotask::IoTask;
|
|
|
|
use future_spawn = future::spawn;
|
2012-06-26 08:53:32 -05:00
|
|
|
// FIXME #1935
|
|
|
|
// should be able to, but can't atm, replace w/ result::{result, extensions};
|
2012-09-04 13:23:53 -05:00
|
|
|
use result::*;
|
|
|
|
use libc::size_t;
|
|
|
|
use io::{Reader, ReaderUtil, Writer};
|
|
|
|
use comm = core::comm;
|
2012-04-30 23:59:20 -05:00
|
|
|
|
2012-05-28 00:50:11 -05:00
|
|
|
// tcp interfaces
|
2012-08-30 13:01:39 -05:00
|
|
|
export TcpSocket;
|
2012-06-08 00:10:18 -05:00
|
|
|
// buffered socket
|
2012-09-09 23:58:28 -05:00
|
|
|
export TcpSocketBuf, socket_buf;
|
2012-05-28 00:50:11 -05:00
|
|
|
// errors
|
2012-08-30 13:01:39 -05:00
|
|
|
export TcpErrData, TcpConnectErrData;
|
2012-05-09 17:07:54 -05:00
|
|
|
// operations on a tcp_socket
|
2012-05-21 09:52:44 -05:00
|
|
|
export write, write_future, read_start, read_stop;
|
2012-05-09 17:07:54 -05:00
|
|
|
// tcp server stuff
|
2012-05-29 17:15:04 -05:00
|
|
|
export listen, accept;
|
2012-05-09 17:07:54 -05:00
|
|
|
// tcp client stuff
|
|
|
|
export connect;
|
2012-04-30 23:59:20 -05:00
|
|
|
|
2012-05-16 17:05:48 -05:00
|
|
|
#[nolink]
|
2012-07-03 18:11:00 -05:00
|
|
|
extern mod rustrt {
|
2012-09-21 20:10:45 -05:00
|
|
|
#[legacy_exports];
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
fn rust_uv_current_kernel_malloc(size: libc::c_uint) -> *libc::c_void;
|
2012-05-16 17:05:48 -05:00
|
|
|
fn rust_uv_current_kernel_free(mem: *libc::c_void);
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
fn rust_uv_helper_uv_tcp_t_size() -> libc::c_uint;
|
2012-05-16 17:05:48 -05:00
|
|
|
}
|
|
|
|
|
2012-07-04 16:53:12 -05:00
|
|
|
/**
|
|
|
|
* Encapsulates an open TCP/IP connection through libuv
|
|
|
|
*
|
|
|
|
* `tcp_socket` is non-copyable/sendable and automagically handles closing the
|
|
|
|
* underlying libuv data structures when it goes out of scope. This is the
|
|
|
|
* data structure that is used for read/write operations over a TCP stream.
|
|
|
|
*/
|
2012-08-30 13:01:39 -05:00
|
|
|
struct TcpSocket {
|
2012-09-06 21:40:15 -05:00
|
|
|
socket_data: @TcpSocketData,
|
2012-06-22 13:53:25 -05:00
|
|
|
drop {
|
2012-05-16 17:05:48 -05:00
|
|
|
unsafe {
|
2012-06-26 16:39:47 -05:00
|
|
|
tear_down_socket_data(self.socket_data)
|
2012-06-22 13:53:25 -05:00
|
|
|
}
|
|
|
|
}
|
2012-04-30 23:59:20 -05:00
|
|
|
}
|
|
|
|
|
2012-09-04 19:22:09 -05:00
|
|
|
fn TcpSocket(socket_data: @TcpSocketData) -> TcpSocket {
|
|
|
|
TcpSocket {
|
|
|
|
socket_data: socket_data
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-04 16:53:12 -05:00
|
|
|
/**
|
|
|
|
* A buffered wrapper for `net::tcp::tcp_socket`
|
|
|
|
*
|
|
|
|
* It is created with a call to `net::tcp::socket_buf()` and has impls that
|
2012-07-31 12:27:51 -05:00
|
|
|
* satisfy both the `io::reader` and `io::writer` traits.
|
2012-07-04 16:53:12 -05:00
|
|
|
*/
|
2012-08-30 13:01:39 -05:00
|
|
|
struct TcpSocketBuf {
|
2012-09-06 21:40:15 -05:00
|
|
|
data: @TcpBufferedSocketData,
|
2012-09-04 19:22:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
fn TcpSocketBuf(data: @TcpBufferedSocketData) -> TcpSocketBuf {
|
|
|
|
TcpSocketBuf {
|
|
|
|
data: data
|
|
|
|
}
|
2012-05-16 17:05:48 -05:00
|
|
|
}
|
|
|
|
|
2012-07-04 16:53:12 -05:00
|
|
|
/// Contains raw, string-based, error information returned from libuv
|
2012-08-30 13:01:39 -05:00
|
|
|
type TcpErrData = {
|
2012-07-14 00:57:48 -05:00
|
|
|
err_name: ~str,
|
|
|
|
err_msg: ~str
|
2012-05-02 08:28:50 -05:00
|
|
|
};
|
2012-07-04 16:53:12 -05:00
|
|
|
/// Details returned as part of a `result::err` result from `tcp::listen`
|
2012-08-30 13:01:39 -05:00
|
|
|
enum TcpListenErrData {
|
2012-07-04 16:53:12 -05:00
|
|
|
/**
|
|
|
|
* Some unplanned-for error. The first and second fields correspond
|
|
|
|
* to libuv's `err_name` and `err_msg` fields, respectively.
|
|
|
|
*/
|
2012-08-30 13:01:39 -05:00
|
|
|
GenericListenErr(~str, ~str),
|
2012-07-04 16:53:12 -05:00
|
|
|
/**
|
|
|
|
* Failed to bind to the requested IP/Port, because it is already in use.
|
|
|
|
*
|
|
|
|
* # Possible Causes
|
|
|
|
*
|
|
|
|
* * Attempting to bind to a port already bound to another listener
|
|
|
|
*/
|
2012-08-30 13:01:39 -05:00
|
|
|
AddressInUse,
|
2012-07-04 16:53:12 -05:00
|
|
|
/**
|
|
|
|
* Request to bind to an IP/Port was denied by the system.
|
|
|
|
*
|
|
|
|
* # Possible Causes
|
|
|
|
*
|
|
|
|
* * Attemping to binding to an IP/Port as a non-Administrator
|
|
|
|
* on Windows Vista+
|
|
|
|
* * Attempting to bind, as a non-priv'd
|
|
|
|
* user, to 'privileged' ports (< 1024) on *nix
|
|
|
|
*/
|
2012-08-30 13:01:39 -05:00
|
|
|
AccessDenied
|
2012-06-05 08:40:39 -05:00
|
|
|
}
|
2012-07-04 16:53:12 -05:00
|
|
|
/// Details returned as part of a `result::err` result from `tcp::connect`
|
2012-08-30 13:01:39 -05:00
|
|
|
enum TcpConnectErrData {
|
2012-07-04 16:53:12 -05:00
|
|
|
/**
|
|
|
|
* Some unplanned-for error. The first and second fields correspond
|
|
|
|
* to libuv's `err_name` and `err_msg` fields, respectively.
|
|
|
|
*/
|
2012-08-30 13:01:39 -05:00
|
|
|
GenericConnectErr(~str, ~str),
|
2012-07-04 16:53:12 -05:00
|
|
|
/// Invalid IP or invalid port
|
2012-08-30 13:01:39 -05:00
|
|
|
ConnectionRefused
|
2012-05-28 00:50:11 -05:00
|
|
|
}
|
2012-05-02 08:28:50 -05:00
|
|
|
|
2012-07-04 16:53:12 -05:00
|
|
|
/**
|
|
|
|
* Initiate a client connection over TCP/IP
|
|
|
|
*
|
|
|
|
* # Arguments
|
|
|
|
*
|
|
|
|
* * `input_ip` - The IP address (versions 4 or 6) of the remote host
|
|
|
|
* * `port` - the unsigned integer of the desired remote host port
|
|
|
|
* * `iotask` - a `uv::iotask` that the tcp request will run on
|
|
|
|
*
|
|
|
|
* # Returns
|
|
|
|
*
|
|
|
|
* A `result` that, if the operation succeeds, contains a
|
|
|
|
* `net::net::tcp_socket` that can be used to send and receive data to/from
|
|
|
|
* the remote host. In the event of failure, a
|
|
|
|
* `net::tcp::tcp_connect_err_data` instance will be returned
|
|
|
|
*/
|
2012-09-19 20:32:13 -05:00
|
|
|
fn connect(+input_ip: ip::IpAddr, port: uint,
|
2012-08-29 19:41:38 -05:00
|
|
|
iotask: IoTask)
|
2012-08-30 13:01:39 -05:00
|
|
|
-> result::Result<TcpSocket, TcpConnectErrData> unsafe {
|
|
|
|
let result_po = core::comm::Port::<ConnAttempt>();
|
2012-08-27 16:22:25 -05:00
|
|
|
let closed_signal_po = core::comm::Port::<()>();
|
2012-04-30 23:59:20 -05:00
|
|
|
let conn_data = {
|
2012-08-27 16:22:25 -05:00
|
|
|
result_ch: core::comm::Chan(result_po),
|
|
|
|
closed_signal_ch: core::comm::Chan(closed_signal_po)
|
2012-04-30 23:59:20 -05:00
|
|
|
};
|
|
|
|
let conn_data_ptr = ptr::addr_of(conn_data);
|
2012-08-30 13:01:39 -05:00
|
|
|
let reader_po = core::comm::Port::<result::Result<~[u8], TcpErrData>>();
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
let stream_handle_ptr = malloc_uv_tcp_t();
|
|
|
|
*(stream_handle_ptr as *mut uv::ll::uv_tcp_t) = uv::ll::tcp_t();
|
|
|
|
let socket_data = @{
|
2012-05-01 20:27:07 -05:00
|
|
|
reader_po: reader_po,
|
2012-08-27 16:22:25 -05:00
|
|
|
reader_ch: core::comm::Chan(reader_po),
|
2012-05-21 08:45:18 -05:00
|
|
|
stream_handle_ptr: stream_handle_ptr,
|
|
|
|
connect_req: uv::ll::connect_t(),
|
|
|
|
write_req: uv::ll::write_t(),
|
2012-05-25 01:42:12 -05:00
|
|
|
iotask: iotask
|
2012-04-30 23:59:20 -05:00
|
|
|
};
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
let socket_data_ptr = ptr::addr_of(*socket_data);
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("tcp_connect result_ch %?", conn_data.result_ch));
|
2012-04-30 23:59:20 -05:00
|
|
|
// get an unsafe representation of our stream_handle_ptr that
|
|
|
|
// we can send into the interact cb to be handled in libuv..
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("stream_handle_ptr outside interact %?",
|
|
|
|
stream_handle_ptr));
|
2012-09-11 19:17:54 -05:00
|
|
|
do iotask::interact(iotask) |move input_ip, loop_ptr| unsafe {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"in interact cb for tcp client connect..");
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("stream_handle_ptr in interact %?",
|
|
|
|
stream_handle_ptr));
|
2012-08-06 14:34:08 -05:00
|
|
|
match uv::ll::tcp_init( loop_ptr, stream_handle_ptr) {
|
2012-08-03 21:59:04 -05:00
|
|
|
0i32 => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"tcp_init successful");
|
2012-08-27 16:09:47 -05:00
|
|
|
log(debug, ~"dealing w/ ipv4 connection..");
|
|
|
|
let connect_req_ptr =
|
|
|
|
ptr::addr_of((*socket_data_ptr).connect_req);
|
2012-09-03 00:18:08 -05:00
|
|
|
let addr_str = ip::format_addr(&input_ip);
|
2012-08-27 16:09:47 -05:00
|
|
|
let connect_result = match input_ip {
|
2012-08-30 13:01:39 -05:00
|
|
|
ip::Ipv4(addr) => {
|
2012-08-27 16:09:47 -05:00
|
|
|
// have to "recreate" the sockaddr_in/6
|
|
|
|
// since the ip_addr discards the port
|
|
|
|
// info.. should probably add an additional
|
|
|
|
// rust type that actually is closer to
|
|
|
|
// what the libuv API expects (ip str + port num)
|
|
|
|
log(debug, fmt!("addr: %?", addr));
|
|
|
|
let in_addr = uv::ll::ip4_addr(addr_str, port as int);
|
|
|
|
uv::ll::tcp_connect(
|
|
|
|
connect_req_ptr,
|
|
|
|
stream_handle_ptr,
|
|
|
|
ptr::addr_of(in_addr),
|
|
|
|
tcp_connect_on_connect_cb)
|
|
|
|
}
|
2012-08-30 13:01:39 -05:00
|
|
|
ip::Ipv6(addr) => {
|
2012-08-27 16:09:47 -05:00
|
|
|
log(debug, fmt!("addr: %?", addr));
|
|
|
|
let in_addr = uv::ll::ip6_addr(addr_str, port as int);
|
|
|
|
uv::ll::tcp_connect6(
|
|
|
|
connect_req_ptr,
|
|
|
|
stream_handle_ptr,
|
|
|
|
ptr::addr_of(in_addr),
|
|
|
|
tcp_connect_on_connect_cb)
|
|
|
|
}
|
|
|
|
};
|
|
|
|
match connect_result {
|
|
|
|
0i32 => {
|
|
|
|
log(debug, ~"tcp_connect successful");
|
|
|
|
// reusable data that we'll have for the
|
|
|
|
// duration..
|
|
|
|
uv::ll::set_data_for_uv_handle(stream_handle_ptr,
|
|
|
|
socket_data_ptr as
|
|
|
|
*libc::c_void);
|
|
|
|
// just so the connect_cb can send the
|
|
|
|
// outcome..
|
|
|
|
uv::ll::set_data_for_req(connect_req_ptr,
|
|
|
|
conn_data_ptr);
|
|
|
|
log(debug, ~"leaving tcp_connect interact cb...");
|
|
|
|
// let tcp_connect_on_connect_cb send on
|
|
|
|
// the result_ch, now..
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
// immediate connect failure.. probably a garbage
|
|
|
|
// ip or somesuch
|
|
|
|
let err_data = uv::ll::get_last_err_data(loop_ptr);
|
|
|
|
core::comm::send((*conn_data_ptr).result_ch,
|
2012-08-30 13:01:39 -05:00
|
|
|
ConnFailure(err_data.to_tcp_err()));
|
2012-08-27 16:09:47 -05:00
|
|
|
uv::ll::set_data_for_uv_handle(stream_handle_ptr,
|
|
|
|
conn_data_ptr);
|
|
|
|
uv::ll::close(stream_handle_ptr, stream_error_close_cb);
|
2012-04-30 23:59:20 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-08-03 21:59:04 -05:00
|
|
|
_ => {
|
2012-04-30 23:59:20 -05:00
|
|
|
// failure to create a tcp handle
|
|
|
|
let err_data = uv::ll::get_last_err_data(loop_ptr);
|
2012-08-14 16:17:27 -05:00
|
|
|
core::comm::send((*conn_data_ptr).result_ch,
|
2012-08-30 13:01:39 -05:00
|
|
|
ConnFailure(err_data.to_tcp_err()));
|
2012-04-30 23:59:20 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2012-08-14 16:17:27 -05:00
|
|
|
match core::comm::recv(result_po) {
|
2012-08-30 13:01:39 -05:00
|
|
|
ConnSuccess => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"tcp::connect - received success on result_po");
|
2012-08-30 13:01:39 -05:00
|
|
|
result::Ok(TcpSocket(socket_data))
|
2012-04-30 23:59:20 -05:00
|
|
|
}
|
2012-08-30 13:01:39 -05:00
|
|
|
ConnFailure(err_data) => {
|
2012-08-14 16:17:27 -05:00
|
|
|
core::comm::recv(closed_signal_po);
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"tcp::connect - received failure on result_po");
|
2012-05-28 00:50:11 -05:00
|
|
|
// still have to free the malloc'd stream handle..
|
|
|
|
rustrt::rust_uv_current_kernel_free(stream_handle_ptr
|
|
|
|
as *libc::c_void);
|
2012-08-06 14:34:08 -05:00
|
|
|
let tcp_conn_err = match err_data.err_name {
|
2012-08-30 13:01:39 -05:00
|
|
|
~"ECONNREFUSED" => ConnectionRefused,
|
|
|
|
_ => GenericConnectErr(err_data.err_name, err_data.err_msg)
|
2012-05-28 00:50:11 -05:00
|
|
|
};
|
2012-08-26 18:54:31 -05:00
|
|
|
result::Err(tcp_conn_err)
|
2012-04-30 23:59:20 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-05-01 18:42:33 -05:00
|
|
|
|
2012-07-04 16:53:12 -05:00
|
|
|
/**
|
|
|
|
* Write binary data to a tcp stream; Blocks until operation completes
|
|
|
|
*
|
|
|
|
* # Arguments
|
|
|
|
*
|
|
|
|
* * sock - a `tcp_socket` to write to
|
2012-07-11 18:49:02 -05:00
|
|
|
* * raw_write_data - a vector of `~[u8]` that will be written to the stream.
|
2012-07-04 16:53:12 -05:00
|
|
|
* This value must remain valid for the duration of the `write` call
|
|
|
|
*
|
|
|
|
* # Returns
|
|
|
|
*
|
|
|
|
* A `result` object with a `nil` value as the `ok` variant, or a
|
|
|
|
* `tcp_err_data` value as the `err` variant
|
|
|
|
*/
|
2012-09-19 20:32:13 -05:00
|
|
|
fn write(sock: &TcpSocket, raw_write_data: ~[u8])
|
2012-08-30 13:01:39 -05:00
|
|
|
-> result::Result<(), TcpErrData> unsafe {
|
2012-06-22 13:53:25 -05:00
|
|
|
let socket_data_ptr = ptr::addr_of(*(sock.socket_data));
|
2012-05-21 09:52:44 -05:00
|
|
|
write_common_impl(socket_data_ptr, raw_write_data)
|
|
|
|
}
|
|
|
|
|
2012-07-04 16:53:12 -05:00
|
|
|
/**
|
|
|
|
* Write binary data to tcp stream; Returns a `future::future` value
|
|
|
|
* immediately
|
|
|
|
*
|
|
|
|
* # Safety
|
|
|
|
*
|
|
|
|
* This function can produce unsafe results if:
|
|
|
|
*
|
|
|
|
* 1. the call to `write_future` is made
|
|
|
|
* 2. the `future::future` value returned is never resolved via
|
|
|
|
* `future::get`
|
|
|
|
* 3. and then the `tcp_socket` passed in to `write_future` leaves
|
|
|
|
* scope and is destructed before the task that runs the libuv write
|
|
|
|
* operation completes.
|
|
|
|
*
|
|
|
|
* As such: If using `write_future`, always be sure to resolve the returned
|
|
|
|
* `future` so as to ensure libuv doesn't try to access a released write
|
|
|
|
* handle. Otherwise, use the blocking `tcp::write` function instead.
|
|
|
|
*
|
|
|
|
* # Arguments
|
|
|
|
*
|
|
|
|
* * sock - a `tcp_socket` to write to
|
2012-07-11 18:49:02 -05:00
|
|
|
* * raw_write_data - a vector of `~[u8]` that will be written to the stream.
|
2012-07-04 16:53:12 -05:00
|
|
|
* This value must remain valid for the duration of the `write` call
|
|
|
|
*
|
|
|
|
* # Returns
|
|
|
|
*
|
|
|
|
* A `future` value that, once the `write` operation completes, resolves to a
|
|
|
|
* `result` object with a `nil` value as the `ok` variant, or a `tcp_err_data`
|
|
|
|
* value as the `err` variant
|
|
|
|
*/
|
2012-09-19 20:32:13 -05:00
|
|
|
fn write_future(sock: &TcpSocket, raw_write_data: ~[u8])
|
2012-08-30 13:01:39 -05:00
|
|
|
-> future::Future<result::Result<(), TcpErrData>> unsafe {
|
2012-06-22 13:53:25 -05:00
|
|
|
let socket_data_ptr = ptr::addr_of(*(sock.socket_data));
|
2012-07-04 14:04:28 -05:00
|
|
|
do future_spawn {
|
2012-06-05 09:12:12 -05:00
|
|
|
let data_copy = copy(raw_write_data);
|
2012-06-26 16:39:47 -05:00
|
|
|
write_common_impl(socket_data_ptr, data_copy)
|
2012-05-02 18:22:44 -05:00
|
|
|
}
|
2012-05-01 18:42:33 -05:00
|
|
|
}
|
|
|
|
|
2012-07-04 16:53:12 -05:00
|
|
|
/**
|
|
|
|
* Begin reading binary data from an open TCP connection; used with
|
|
|
|
* `read_stop`
|
|
|
|
*
|
|
|
|
* # Arguments
|
|
|
|
*
|
|
|
|
* * sock -- a `net::tcp::tcp_socket` for the connection to read from
|
|
|
|
*
|
|
|
|
* # Returns
|
|
|
|
*
|
|
|
|
* * A `result` instance that will either contain a
|
2012-08-14 16:17:27 -05:00
|
|
|
* `core::comm::port<tcp_read_result>` that the user can read (and
|
|
|
|
* optionally, loop on) from until `read_stop` is called, or a
|
|
|
|
* `tcp_err_data` record
|
2012-07-04 16:53:12 -05:00
|
|
|
*/
|
2012-09-19 20:32:13 -05:00
|
|
|
fn read_start(sock: &TcpSocket)
|
2012-08-26 18:54:31 -05:00
|
|
|
-> result::Result<comm::Port<
|
2012-08-30 13:01:39 -05:00
|
|
|
result::Result<~[u8], TcpErrData>>, TcpErrData> unsafe {
|
2012-06-22 13:53:25 -05:00
|
|
|
let socket_data = ptr::addr_of(*(sock.socket_data));
|
2012-05-21 11:58:33 -05:00
|
|
|
read_start_common_impl(socket_data)
|
2012-05-01 20:27:07 -05:00
|
|
|
}
|
2012-05-01 18:42:33 -05:00
|
|
|
|
2012-07-04 16:53:12 -05:00
|
|
|
/**
|
|
|
|
* Stop reading from an open TCP connection; used with `read_start`
|
|
|
|
*
|
|
|
|
* # Arguments
|
|
|
|
*
|
|
|
|
* * `sock` - a `net::tcp::tcp_socket` that you wish to stop reading on
|
|
|
|
*/
|
2012-09-19 20:32:13 -05:00
|
|
|
fn read_stop(sock: &TcpSocket,
|
|
|
|
+read_port: comm::Port<result::Result<~[u8], TcpErrData>>) ->
|
2012-08-30 13:01:39 -05:00
|
|
|
result::Result<(), TcpErrData> unsafe {
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("taking the read_port out of commission %?", read_port));
|
2012-06-26 16:39:47 -05:00
|
|
|
let socket_data = ptr::addr_of(*sock.socket_data);
|
2012-05-21 11:58:33 -05:00
|
|
|
read_stop_common_impl(socket_data)
|
|
|
|
}
|
|
|
|
|
2012-07-04 16:53:12 -05:00
|
|
|
/**
|
|
|
|
* Reads a single chunk of data from `tcp_socket`; block until data/error
|
|
|
|
* recv'd
|
|
|
|
*
|
|
|
|
* Does a blocking read operation for a single chunk of data from a
|
|
|
|
* `tcp_socket` until a data arrives or an error is received. The provided
|
|
|
|
* `timeout_msecs` value is used to raise an error if the timeout period
|
|
|
|
* passes without any data received.
|
|
|
|
*
|
|
|
|
* # Arguments
|
|
|
|
*
|
|
|
|
* * `sock` - a `net::tcp::tcp_socket` that you wish to read from
|
|
|
|
* * `timeout_msecs` - a `uint` value, in msecs, to wait before dropping the
|
|
|
|
* read attempt. Pass `0u` to wait indefinitely
|
|
|
|
*/
|
2012-09-19 20:32:13 -05:00
|
|
|
fn read(sock: &TcpSocket, timeout_msecs: uint)
|
2012-08-30 13:01:39 -05:00
|
|
|
-> result::Result<~[u8],TcpErrData> {
|
2012-06-22 13:53:25 -05:00
|
|
|
let socket_data = ptr::addr_of(*(sock.socket_data));
|
2012-05-21 11:58:33 -05:00
|
|
|
read_common_impl(socket_data, timeout_msecs)
|
|
|
|
}
|
|
|
|
|
2012-07-04 16:53:12 -05:00
|
|
|
/**
|
2012-07-11 18:49:02 -05:00
|
|
|
* Reads a single chunk of data; returns a `future::future<~[u8]>`
|
2012-07-04 16:53:12 -05:00
|
|
|
* immediately
|
|
|
|
*
|
|
|
|
* Does a non-blocking read operation for a single chunk of data from a
|
|
|
|
* `tcp_socket` and immediately returns a `future` value representing the
|
|
|
|
* result. When resolving the returned `future`, it will block until data
|
|
|
|
* arrives or an error is received. The provided `timeout_msecs`
|
|
|
|
* value is used to raise an error if the timeout period passes without any
|
|
|
|
* data received.
|
|
|
|
*
|
|
|
|
* # Safety
|
|
|
|
*
|
|
|
|
* This function can produce unsafe results if the call to `read_future` is
|
|
|
|
* made, the `future::future` value returned is never resolved via
|
|
|
|
* `future::get`, and then the `tcp_socket` passed in to `read_future` leaves
|
|
|
|
* scope and is destructed before the task that runs the libuv read
|
|
|
|
* operation completes.
|
|
|
|
*
|
|
|
|
* As such: If using `read_future`, always be sure to resolve the returned
|
|
|
|
* `future` so as to ensure libuv doesn't try to access a released read
|
|
|
|
* handle. Otherwise, use the blocking `tcp::read` function instead.
|
|
|
|
*
|
|
|
|
* # Arguments
|
|
|
|
*
|
|
|
|
* * `sock` - a `net::tcp::tcp_socket` that you wish to read from
|
|
|
|
* * `timeout_msecs` - a `uint` value, in msecs, to wait before dropping the
|
|
|
|
* read attempt. Pass `0u` to wait indefinitely
|
|
|
|
*/
|
2012-09-19 20:32:13 -05:00
|
|
|
fn read_future(sock: &TcpSocket, timeout_msecs: uint)
|
2012-08-30 13:01:39 -05:00
|
|
|
-> future::Future<result::Result<~[u8],TcpErrData>> {
|
2012-06-22 13:53:25 -05:00
|
|
|
let socket_data = ptr::addr_of(*(sock.socket_data));
|
2012-07-04 14:04:28 -05:00
|
|
|
do future_spawn {
|
2012-05-21 11:58:33 -05:00
|
|
|
read_common_impl(socket_data, timeout_msecs)
|
2012-05-02 18:22:44 -05:00
|
|
|
}
|
2012-05-01 20:27:07 -05:00
|
|
|
}
|
2012-05-01 18:42:33 -05:00
|
|
|
|
2012-07-04 16:53:12 -05:00
|
|
|
/**
|
|
|
|
* Bind an incoming client connection to a `net::tcp::tcp_socket`
|
|
|
|
*
|
|
|
|
* # Notes
|
|
|
|
*
|
|
|
|
* It is safe to call `net::tcp::accept` _only_ within the context of the
|
|
|
|
* `new_connect_cb` callback provided as the final argument to the
|
|
|
|
* `net::tcp::listen` function.
|
|
|
|
*
|
|
|
|
* The `new_conn` opaque value is provided _only_ as the first argument to the
|
|
|
|
* `new_connect_cb` provided as a part of `net::tcp::listen`.
|
|
|
|
* It can be safely sent to another task but it _must_ be
|
|
|
|
* used (via `net::tcp::accept`) before the `new_connect_cb` call it was
|
|
|
|
* provided to returns.
|
|
|
|
*
|
|
|
|
* This implies that a port/chan pair must be used to make sure that the
|
|
|
|
* `new_connect_cb` call blocks until an attempt to create a
|
|
|
|
* `net::tcp::tcp_socket` is completed.
|
|
|
|
*
|
|
|
|
* # Example
|
|
|
|
*
|
|
|
|
* Here, the `new_conn` is used in conjunction with `accept` from within
|
|
|
|
* a task spawned by the `new_connect_cb` passed into `listen`
|
|
|
|
*
|
|
|
|
* ~~~~~~~~~~~
|
|
|
|
* net::tcp::listen(remote_ip, remote_port, backlog)
|
|
|
|
* // this callback is ran once after the connection is successfully
|
|
|
|
* // set up
|
|
|
|
* {|kill_ch|
|
|
|
|
* // pass the kill_ch to your main loop or wherever you want
|
|
|
|
* // to be able to externally kill the server from
|
|
|
|
* }
|
|
|
|
* // this callback is ran when a new connection arrives
|
|
|
|
* {|new_conn, kill_ch|
|
2012-08-14 16:17:27 -05:00
|
|
|
* let cont_po = core::comm::port::<option<tcp_err_data>>();
|
|
|
|
* let cont_ch = core::comm::chan(cont_po);
|
2012-07-04 16:53:12 -05:00
|
|
|
* task::spawn {||
|
|
|
|
* let accept_result = net::tcp::accept(new_conn);
|
|
|
|
* if accept_result.is_err() {
|
2012-08-14 16:17:27 -05:00
|
|
|
* core::comm::send(cont_ch, result::get_err(accept_result));
|
2012-07-04 16:53:12 -05:00
|
|
|
* // fail?
|
|
|
|
* }
|
|
|
|
* else {
|
|
|
|
* let sock = result::get(accept_result);
|
2012-08-14 16:17:27 -05:00
|
|
|
* core::comm::send(cont_ch, true);
|
2012-07-04 16:53:12 -05:00
|
|
|
* // do work here
|
|
|
|
* }
|
|
|
|
* };
|
2012-08-14 16:17:27 -05:00
|
|
|
* match core::comm::recv(cont_po) {
|
2012-07-04 16:53:12 -05:00
|
|
|
* // shut down listen()
|
2012-08-14 16:17:27 -05:00
|
|
|
* some(err_data) { core::comm::send(kill_chan, some(err_data)) }
|
2012-07-04 16:53:12 -05:00
|
|
|
* // wait for next connection
|
|
|
|
* none {}
|
|
|
|
* }
|
|
|
|
* };
|
|
|
|
* ~~~~~~~~~~~
|
|
|
|
*
|
|
|
|
* # Arguments
|
|
|
|
*
|
|
|
|
* * `new_conn` - an opaque value used to create a new `tcp_socket`
|
|
|
|
*
|
|
|
|
* # Returns
|
|
|
|
*
|
|
|
|
* On success, this function will return a `net::tcp::tcp_socket` as the
|
|
|
|
* `ok` variant of a `result`. The `net::tcp::tcp_socket` is anchored within
|
|
|
|
* the task that `accept` was called within for its lifetime. On failure,
|
|
|
|
* this function will return a `net::tcp::tcp_err_data` record
|
|
|
|
* as the `err` variant of a `result`.
|
|
|
|
*/
|
2012-08-30 13:01:39 -05:00
|
|
|
fn accept(new_conn: TcpNewConnection)
|
|
|
|
-> result::Result<TcpSocket, TcpErrData> unsafe {
|
2012-05-05 11:30:56 -05:00
|
|
|
|
2012-08-06 14:34:08 -05:00
|
|
|
match new_conn{
|
2012-08-30 13:01:39 -05:00
|
|
|
NewTcpConn(server_handle_ptr) => {
|
2012-05-05 11:30:56 -05:00
|
|
|
let server_data_ptr = uv::ll::get_data_for_uv_handle(
|
2012-08-30 13:01:39 -05:00
|
|
|
server_handle_ptr) as *TcpListenFcData;
|
2012-08-27 16:22:25 -05:00
|
|
|
let reader_po = core::comm::Port();
|
2012-05-25 01:42:12 -05:00
|
|
|
let iotask = (*server_data_ptr).iotask;
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
let stream_handle_ptr = malloc_uv_tcp_t();
|
|
|
|
*(stream_handle_ptr as *mut uv::ll::uv_tcp_t) = uv::ll::tcp_t();
|
|
|
|
let client_socket_data = @{
|
2012-05-05 11:30:56 -05:00
|
|
|
reader_po: reader_po,
|
2012-08-27 16:22:25 -05:00
|
|
|
reader_ch: core::comm::Chan(reader_po),
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
stream_handle_ptr : stream_handle_ptr,
|
2012-05-05 11:30:56 -05:00
|
|
|
connect_req : uv::ll::connect_t(),
|
|
|
|
write_req : uv::ll::write_t(),
|
2012-05-25 01:42:12 -05:00
|
|
|
iotask : iotask
|
2012-05-05 11:30:56 -05:00
|
|
|
};
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
let client_socket_data_ptr = ptr::addr_of(*client_socket_data);
|
|
|
|
let client_stream_handle_ptr =
|
|
|
|
(*client_socket_data_ptr).stream_handle_ptr;
|
2012-05-05 11:30:56 -05:00
|
|
|
|
2012-08-30 13:01:39 -05:00
|
|
|
let result_po = core::comm::Port::<Option<TcpErrData>>();
|
2012-08-27 16:22:25 -05:00
|
|
|
let result_ch = core::comm::Chan(result_po);
|
2012-05-09 17:07:54 -05:00
|
|
|
|
|
|
|
// UNSAFE LIBUV INTERACTION BEGIN
|
|
|
|
// .. normally this happens within the context of
|
|
|
|
// a call to uv::hl::interact.. but we're breaking
|
|
|
|
// the rules here because this always has to be
|
|
|
|
// called within the context of a listen() new_connect_cb
|
|
|
|
// callback (or it will likely fail and drown your cat)
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"in interact cb for tcp::accept");
|
2012-05-09 17:07:54 -05:00
|
|
|
let loop_ptr = uv::ll::get_loop_for_uv_handle(
|
|
|
|
server_handle_ptr);
|
2012-08-06 14:34:08 -05:00
|
|
|
match uv::ll::tcp_init(loop_ptr, client_stream_handle_ptr) {
|
2012-08-03 21:59:04 -05:00
|
|
|
0i32 => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"uv_tcp_init successful for client stream");
|
2012-08-06 14:34:08 -05:00
|
|
|
match uv::ll::accept(
|
2012-05-09 17:07:54 -05:00
|
|
|
server_handle_ptr as *libc::c_void,
|
|
|
|
client_stream_handle_ptr as *libc::c_void) {
|
2012-08-03 21:59:04 -05:00
|
|
|
0i32 => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"successfully accepted client connection");
|
2012-05-09 17:07:54 -05:00
|
|
|
uv::ll::set_data_for_uv_handle(client_stream_handle_ptr,
|
2012-05-16 17:05:48 -05:00
|
|
|
client_socket_data_ptr
|
|
|
|
as *libc::c_void);
|
2012-08-20 14:23:37 -05:00
|
|
|
core::comm::send(result_ch, None);
|
2012-05-05 11:30:56 -05:00
|
|
|
}
|
2012-08-03 21:59:04 -05:00
|
|
|
_ => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"failed to accept client conn");
|
2012-08-20 14:23:37 -05:00
|
|
|
core::comm::send(result_ch, Some(
|
2012-05-05 11:30:56 -05:00
|
|
|
uv::ll::get_last_err_data(loop_ptr).to_tcp_err()));
|
|
|
|
}
|
|
|
|
}
|
2012-05-09 17:07:54 -05:00
|
|
|
}
|
2012-08-03 21:59:04 -05:00
|
|
|
_ => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"failed to init client stream");
|
2012-08-20 14:23:37 -05:00
|
|
|
core::comm::send(result_ch, Some(
|
2012-05-09 17:07:54 -05:00
|
|
|
uv::ll::get_last_err_data(loop_ptr).to_tcp_err()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// UNSAFE LIBUV INTERACTION END
|
2012-08-14 16:17:27 -05:00
|
|
|
match core::comm::recv(result_po) {
|
2012-08-26 18:54:31 -05:00
|
|
|
Some(err_data) => result::Err(err_data),
|
2012-08-30 13:01:39 -05:00
|
|
|
None => result::Ok(TcpSocket(client_socket_data))
|
2012-05-05 11:30:56 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-04 16:53:12 -05:00
|
|
|
/**
|
|
|
|
* Bind to a given IP/port and listen for new connections
|
|
|
|
*
|
|
|
|
* # Arguments
|
|
|
|
*
|
|
|
|
* * `host_ip` - a `net::ip::ip_addr` representing a unique IP
|
|
|
|
* (versions 4 or 6)
|
|
|
|
* * `port` - a uint representing the port to listen on
|
|
|
|
* * `backlog` - a uint representing the number of incoming connections
|
|
|
|
* to cache in memory
|
|
|
|
* * `hl_loop` - a `uv::hl::high_level_loop` that the tcp request will run on
|
|
|
|
* * `on_establish_cb` - a callback that is evaluated if/when the listener
|
|
|
|
* is successfully established. it takes no parameters
|
|
|
|
* * `new_connect_cb` - a callback to be evaluated, on the libuv thread,
|
|
|
|
* whenever a client attempts to conect on the provided ip/port. the
|
|
|
|
* callback's arguments are:
|
|
|
|
* * `new_conn` - an opaque type that can be passed to
|
|
|
|
* `net::tcp::accept` in order to be converted to a `tcp_socket`.
|
2012-08-14 16:17:27 -05:00
|
|
|
* * `kill_ch` - channel of type `core::comm::chan<option<tcp_err_data>>`.
|
|
|
|
* this channel can be used to send a message to cause `listen` to begin
|
2012-07-04 16:53:12 -05:00
|
|
|
* closing the underlying libuv data structures.
|
|
|
|
*
|
|
|
|
* # returns
|
|
|
|
*
|
|
|
|
* a `result` instance containing empty data of type `()` on a
|
|
|
|
* successful/normal shutdown, and a `tcp_listen_err_data` enum in the event
|
|
|
|
* of listen exiting because of an error
|
|
|
|
*/
|
2012-09-19 20:32:13 -05:00
|
|
|
fn listen(+host_ip: ip::IpAddr, port: uint, backlog: uint,
|
2012-08-29 19:41:38 -05:00
|
|
|
iotask: IoTask,
|
2012-09-19 20:32:13 -05:00
|
|
|
+on_establish_cb: fn~(comm::Chan<Option<TcpErrData>>),
|
2012-08-30 13:01:39 -05:00
|
|
|
+new_connect_cb: fn~(TcpNewConnection,
|
|
|
|
comm::Chan<Option<TcpErrData>>))
|
|
|
|
-> result::Result<(), TcpListenErrData> unsafe {
|
2012-09-11 19:17:54 -05:00
|
|
|
do listen_common(move host_ip, port, backlog, iotask, on_establish_cb)
|
2012-05-29 17:50:10 -05:00
|
|
|
// on_connect_cb
|
2012-09-10 19:50:48 -05:00
|
|
|
|move new_connect_cb, handle| unsafe {
|
2012-05-29 17:50:10 -05:00
|
|
|
let server_data_ptr = uv::ll::get_data_for_uv_handle(handle)
|
2012-08-30 13:01:39 -05:00
|
|
|
as *TcpListenFcData;
|
|
|
|
let new_conn = NewTcpConn(handle);
|
2012-05-29 17:50:10 -05:00
|
|
|
let kill_ch = (*server_data_ptr).kill_ch;
|
|
|
|
new_connect_cb(new_conn, kill_ch);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-19 20:32:13 -05:00
|
|
|
fn listen_common(+host_ip: ip::IpAddr, port: uint, backlog: uint,
|
2012-08-29 19:41:38 -05:00
|
|
|
iotask: IoTask,
|
2012-09-19 20:32:13 -05:00
|
|
|
+on_establish_cb: fn~(comm::Chan<Option<TcpErrData>>),
|
|
|
|
+on_connect_cb: fn~(*uv::ll::uv_tcp_t))
|
2012-08-30 13:01:39 -05:00
|
|
|
-> result::Result<(), TcpListenErrData> unsafe {
|
2012-08-27 16:22:25 -05:00
|
|
|
let stream_closed_po = core::comm::Port::<()>();
|
2012-08-30 13:01:39 -05:00
|
|
|
let kill_po = core::comm::Port::<Option<TcpErrData>>();
|
2012-08-27 16:22:25 -05:00
|
|
|
let kill_ch = core::comm::Chan(kill_po);
|
2012-05-16 17:05:48 -05:00
|
|
|
let server_stream = uv::ll::tcp_t();
|
|
|
|
let server_stream_ptr = ptr::addr_of(server_stream);
|
|
|
|
let server_data = {
|
|
|
|
server_stream_ptr: server_stream_ptr,
|
2012-08-27 16:22:25 -05:00
|
|
|
stream_closed_ch: core::comm::Chan(stream_closed_po),
|
2012-05-16 17:05:48 -05:00
|
|
|
kill_ch: kill_ch,
|
2012-09-11 19:17:54 -05:00
|
|
|
on_connect_cb: move on_connect_cb,
|
2012-05-25 01:42:12 -05:00
|
|
|
iotask: iotask,
|
2012-05-16 17:05:48 -05:00
|
|
|
mut active: true
|
|
|
|
};
|
|
|
|
let server_data_ptr = ptr::addr_of(server_data);
|
|
|
|
|
2012-08-14 16:17:27 -05:00
|
|
|
let setup_result = do core::comm::listen |setup_ch| {
|
2012-06-26 08:53:32 -05:00
|
|
|
// this is to address a compiler warning about
|
2012-06-17 22:36:36 -05:00
|
|
|
// an implicit copy.. it seems that double nested
|
|
|
|
// will defeat a move sigil, as is done to the host_ip
|
|
|
|
// arg above.. this same pattern works w/o complaint in
|
|
|
|
// tcp::connect (because the iotask::interact cb isn't
|
2012-08-14 16:17:27 -05:00
|
|
|
// nested within a core::comm::listen block)
|
2012-06-17 22:36:36 -05:00
|
|
|
let loc_ip = copy(host_ip);
|
2012-09-11 19:17:54 -05:00
|
|
|
do iotask::interact(iotask) |move loc_ip, loop_ptr| unsafe {
|
2012-08-06 14:34:08 -05:00
|
|
|
match uv::ll::tcp_init(loop_ptr, server_stream_ptr) {
|
2012-08-03 21:59:04 -05:00
|
|
|
0i32 => {
|
2012-06-05 08:40:39 -05:00
|
|
|
uv::ll::set_data_for_uv_handle(
|
|
|
|
server_stream_ptr,
|
|
|
|
server_data_ptr);
|
2012-09-03 00:18:08 -05:00
|
|
|
let addr_str = ip::format_addr(&loc_ip);
|
2012-08-06 14:34:08 -05:00
|
|
|
let bind_result = match loc_ip {
|
2012-08-30 13:01:39 -05:00
|
|
|
ip::Ipv4(addr) => {
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("addr: %?", addr));
|
2012-06-26 14:47:44 -05:00
|
|
|
let in_addr = uv::ll::ip4_addr(addr_str, port as int);
|
|
|
|
uv::ll::tcp_bind(server_stream_ptr,
|
|
|
|
ptr::addr_of(in_addr))
|
|
|
|
}
|
2012-08-30 13:01:39 -05:00
|
|
|
ip::Ipv6(addr) => {
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("addr: %?", addr));
|
2012-06-26 14:47:44 -05:00
|
|
|
let in_addr = uv::ll::ip6_addr(addr_str, port as int);
|
|
|
|
uv::ll::tcp_bind6(server_stream_ptr,
|
|
|
|
ptr::addr_of(in_addr))
|
|
|
|
}
|
|
|
|
};
|
2012-08-06 14:34:08 -05:00
|
|
|
match bind_result {
|
2012-08-03 21:59:04 -05:00
|
|
|
0i32 => {
|
2012-08-06 14:34:08 -05:00
|
|
|
match uv::ll::listen(server_stream_ptr,
|
2012-05-29 17:15:04 -05:00
|
|
|
backlog as libc::c_int,
|
|
|
|
tcp_lfc_on_connection_cb) {
|
2012-08-20 14:23:37 -05:00
|
|
|
0i32 => core::comm::send(setup_ch, None),
|
2012-08-03 21:59:04 -05:00
|
|
|
_ => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"failure to uv_listen()");
|
2012-05-29 17:15:04 -05:00
|
|
|
let err_data = uv::ll::get_last_err_data(loop_ptr);
|
2012-08-20 14:23:37 -05:00
|
|
|
core::comm::send(setup_ch, Some(err_data));
|
2012-05-29 17:15:04 -05:00
|
|
|
}
|
|
|
|
}
|
2012-05-16 17:05:48 -05:00
|
|
|
}
|
2012-08-03 21:59:04 -05:00
|
|
|
_ => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"failure to uv_tcp_bind");
|
2012-05-16 17:05:48 -05:00
|
|
|
let err_data = uv::ll::get_last_err_data(loop_ptr);
|
2012-08-20 14:23:37 -05:00
|
|
|
core::comm::send(setup_ch, Some(err_data));
|
2012-05-16 17:05:48 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-08-03 21:59:04 -05:00
|
|
|
_ => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"failure to uv_tcp_init");
|
2012-05-16 17:05:48 -05:00
|
|
|
let err_data = uv::ll::get_last_err_data(loop_ptr);
|
2012-08-20 14:23:37 -05:00
|
|
|
core::comm::send(setup_ch, Some(err_data));
|
2012-05-16 17:05:48 -05:00
|
|
|
}
|
|
|
|
}
|
2012-05-29 17:15:04 -05:00
|
|
|
};
|
|
|
|
setup_ch.recv()
|
2012-05-16 17:05:48 -05:00
|
|
|
};
|
2012-08-06 14:34:08 -05:00
|
|
|
match setup_result {
|
2012-08-20 14:23:37 -05:00
|
|
|
Some(err_data) => {
|
2012-08-24 14:17:08 -05:00
|
|
|
do iotask::interact(iotask) |loop_ptr| unsafe {
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("tcp::listen post-kill recv hl interact %?",
|
|
|
|
loop_ptr));
|
2012-06-05 08:40:39 -05:00
|
|
|
(*server_data_ptr).active = false;
|
|
|
|
uv::ll::close(server_stream_ptr, tcp_lfc_close_cb);
|
|
|
|
};
|
|
|
|
stream_closed_po.recv();
|
2012-08-06 14:34:08 -05:00
|
|
|
match err_data.err_name {
|
2012-08-03 21:59:04 -05:00
|
|
|
~"EACCES" => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"Got EACCES error");
|
2012-08-30 13:01:39 -05:00
|
|
|
result::Err(AccessDenied)
|
2012-06-05 08:40:39 -05:00
|
|
|
}
|
2012-08-03 21:59:04 -05:00
|
|
|
~"EADDRINUSE" => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"Got EADDRINUSE error");
|
2012-08-30 13:01:39 -05:00
|
|
|
result::Err(AddressInUse)
|
2012-05-16 17:05:48 -05:00
|
|
|
}
|
2012-08-03 21:59:04 -05:00
|
|
|
_ => {
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("Got '%s' '%s' libuv error",
|
|
|
|
err_data.err_name, err_data.err_msg));
|
2012-08-26 18:54:31 -05:00
|
|
|
result::Err(
|
2012-08-30 13:01:39 -05:00
|
|
|
GenericListenErr(err_data.err_name, err_data.err_msg))
|
2012-05-16 17:05:48 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-08-20 14:23:37 -05:00
|
|
|
None => {
|
2012-05-16 17:05:48 -05:00
|
|
|
on_establish_cb(kill_ch);
|
2012-08-14 16:17:27 -05:00
|
|
|
let kill_result = core::comm::recv(kill_po);
|
2012-08-24 14:17:08 -05:00
|
|
|
do iotask::interact(iotask) |loop_ptr| unsafe {
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("tcp::listen post-kill recv hl interact %?",
|
|
|
|
loop_ptr));
|
2012-05-16 17:05:48 -05:00
|
|
|
(*server_data_ptr).active = false;
|
|
|
|
uv::ll::close(server_stream_ptr, tcp_lfc_close_cb);
|
|
|
|
};
|
2012-06-05 08:40:39 -05:00
|
|
|
stream_closed_po.recv();
|
2012-08-06 14:34:08 -05:00
|
|
|
match kill_result {
|
2012-05-16 17:05:48 -05:00
|
|
|
// some failure post bind/listen
|
2012-08-30 13:01:39 -05:00
|
|
|
Some(err_data) => result::Err(GenericListenErr(err_data.err_name,
|
2012-08-03 21:59:04 -05:00
|
|
|
err_data.err_msg)),
|
2012-05-16 17:05:48 -05:00
|
|
|
// clean exit
|
2012-08-26 18:54:31 -05:00
|
|
|
None => result::Ok(())
|
2012-05-16 17:05:48 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-04 16:53:12 -05:00
|
|
|
/**
|
|
|
|
* Convert a `net::tcp::tcp_socket` to a `net::tcp::tcp_socket_buf`.
|
|
|
|
*
|
|
|
|
* This function takes ownership of a `net::tcp::tcp_socket`, returning it
|
|
|
|
* stored within a buffered wrapper, which can be converted to a `io::reader`
|
|
|
|
* or `io::writer`
|
|
|
|
*
|
|
|
|
* # Arguments
|
|
|
|
*
|
|
|
|
* * `sock` -- a `net::tcp::tcp_socket` that you want to buffer
|
|
|
|
*
|
|
|
|
* # Returns
|
|
|
|
*
|
|
|
|
* A buffered wrapper that you can cast as an `io::reader` or `io::writer`
|
|
|
|
*/
|
2012-09-19 20:32:13 -05:00
|
|
|
fn socket_buf(+sock: TcpSocket) -> TcpSocketBuf {
|
2012-09-10 19:50:48 -05:00
|
|
|
TcpSocketBuf(@{ sock: move sock, mut buf: ~[] })
|
2012-05-18 12:27:13 -05:00
|
|
|
}
|
|
|
|
|
2012-07-04 16:53:12 -05:00
|
|
|
/// Convenience methods extending `net::tcp::tcp_socket`
|
2012-08-30 13:01:39 -05:00
|
|
|
impl TcpSocket {
|
2012-08-26 18:54:31 -05:00
|
|
|
fn read_start() -> result::Result<comm::Port<
|
2012-08-30 13:01:39 -05:00
|
|
|
result::Result<~[u8], TcpErrData>>, TcpErrData> {
|
2012-09-19 20:32:13 -05:00
|
|
|
read_start(&self)
|
2012-05-18 12:27:13 -05:00
|
|
|
}
|
2012-06-05 09:12:12 -05:00
|
|
|
fn read_stop(-read_port:
|
2012-08-30 13:01:39 -05:00
|
|
|
comm::Port<result::Result<~[u8], TcpErrData>>) ->
|
|
|
|
result::Result<(), TcpErrData> {
|
2012-09-19 20:32:13 -05:00
|
|
|
read_stop(&self, move read_port)
|
2012-05-18 12:27:13 -05:00
|
|
|
}
|
2012-05-21 11:58:33 -05:00
|
|
|
fn read(timeout_msecs: uint) ->
|
2012-08-30 13:01:39 -05:00
|
|
|
result::Result<~[u8], TcpErrData> {
|
2012-09-19 20:32:13 -05:00
|
|
|
read(&self, timeout_msecs)
|
2012-05-21 11:58:33 -05:00
|
|
|
}
|
|
|
|
fn read_future(timeout_msecs: uint) ->
|
2012-08-30 13:01:39 -05:00
|
|
|
future::Future<result::Result<~[u8], TcpErrData>> {
|
2012-09-19 20:32:13 -05:00
|
|
|
read_future(&self, timeout_msecs)
|
2012-05-21 11:58:33 -05:00
|
|
|
}
|
2012-06-29 18:26:56 -05:00
|
|
|
fn write(raw_write_data: ~[u8])
|
2012-08-30 13:01:39 -05:00
|
|
|
-> result::Result<(), TcpErrData> {
|
2012-09-19 20:32:13 -05:00
|
|
|
write(&self, raw_write_data)
|
2012-05-18 12:27:13 -05:00
|
|
|
}
|
2012-06-29 18:26:56 -05:00
|
|
|
fn write_future(raw_write_data: ~[u8])
|
2012-08-30 13:01:39 -05:00
|
|
|
-> future::Future<result::Result<(), TcpErrData>> {
|
2012-09-19 20:32:13 -05:00
|
|
|
write_future(&self, raw_write_data)
|
2012-05-21 11:58:33 -05:00
|
|
|
}
|
2012-05-18 12:27:13 -05:00
|
|
|
}
|
2012-06-07 18:42:39 -05:00
|
|
|
|
2012-07-31 12:27:51 -05:00
|
|
|
/// Implementation of `io::reader` trait for a buffered `net::tcp::tcp_socket`
|
2012-08-30 13:01:39 -05:00
|
|
|
impl TcpSocketBuf: io::Reader {
|
2012-07-20 23:49:20 -05:00
|
|
|
fn read(buf: &[mut u8], len: uint) -> uint {
|
|
|
|
// Loop until our buffer has enough data in it for us to read from.
|
|
|
|
while self.data.buf.len() < len {
|
2012-09-19 20:32:13 -05:00
|
|
|
let read_result = read(&self.data.sock, 0u);
|
2012-06-25 10:02:34 -05:00
|
|
|
if read_result.is_err() {
|
2012-06-09 22:46:29 -05:00
|
|
|
let err_data = read_result.get_err();
|
2012-07-20 23:49:20 -05:00
|
|
|
|
|
|
|
if err_data.err_name == ~"EOF" {
|
|
|
|
break;
|
|
|
|
} else {
|
2012-08-22 19:24:52 -05:00
|
|
|
debug!("ERROR sock_buf as io::reader.read err %? %?",
|
|
|
|
err_data.err_name, err_data.err_msg);
|
2012-07-20 23:49:20 -05:00
|
|
|
|
2012-08-01 19:30:05 -05:00
|
|
|
return 0;
|
2012-07-20 23:49:20 -05:00
|
|
|
}
|
2012-06-07 18:42:39 -05:00
|
|
|
}
|
|
|
|
else {
|
2012-07-20 23:49:20 -05:00
|
|
|
vec::push_all(self.data.buf, result::unwrap(read_result));
|
2012-06-07 18:42:39 -05:00
|
|
|
}
|
|
|
|
}
|
2012-07-20 23:49:20 -05:00
|
|
|
|
2012-08-30 14:54:50 -05:00
|
|
|
let count = uint::min(len, self.data.buf.len());
|
2012-07-20 23:49:20 -05:00
|
|
|
|
|
|
|
let mut data = ~[];
|
|
|
|
self.data.buf <-> data;
|
|
|
|
|
2012-09-14 16:23:30 -05:00
|
|
|
vec::bytes::memcpy(buf, vec::view(data, 0, data.len()), count);
|
2012-07-20 23:49:20 -05:00
|
|
|
|
|
|
|
vec::push_all(self.data.buf, vec::view(data, count, data.len()));
|
|
|
|
|
|
|
|
count
|
2012-06-07 18:42:39 -05:00
|
|
|
}
|
|
|
|
fn read_byte() -> int {
|
2012-09-11 23:25:01 -05:00
|
|
|
let mut bytes = ~[0];
|
2012-07-20 23:49:20 -05:00
|
|
|
if self.read(bytes, 1u) == 0 { fail } else { bytes[0] as int }
|
2012-06-07 18:42:39 -05:00
|
|
|
}
|
|
|
|
fn unread_byte(amt: int) {
|
2012-06-26 16:39:47 -05:00
|
|
|
vec::unshift((*(self.data)).buf, amt as u8);
|
2012-06-07 18:42:39 -05:00
|
|
|
}
|
|
|
|
fn eof() -> bool {
|
|
|
|
false // noop
|
|
|
|
}
|
2012-08-14 15:38:35 -05:00
|
|
|
fn seek(dist: int, seek: io::SeekStyle) {
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("tcp_socket_buf seek stub %? %?", dist, seek));
|
2012-06-07 18:42:39 -05:00
|
|
|
// noop
|
|
|
|
}
|
|
|
|
fn tell() -> uint {
|
|
|
|
0u // noop
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-31 12:27:51 -05:00
|
|
|
/// Implementation of `io::reader` trait for a buffered `net::tcp::tcp_socket`
|
2012-08-30 13:01:39 -05:00
|
|
|
impl TcpSocketBuf: io::Writer {
|
2012-07-11 18:49:02 -05:00
|
|
|
fn write(data: &[const u8]) unsafe {
|
2012-06-27 17:28:03 -05:00
|
|
|
let socket_data_ptr =
|
|
|
|
ptr::addr_of(*((*(self.data)).sock).socket_data);
|
2012-06-26 16:39:47 -05:00
|
|
|
let w_result = write_common_impl(socket_data_ptr,
|
|
|
|
vec::slice(data, 0, vec::len(data)));
|
2012-06-25 10:02:34 -05:00
|
|
|
if w_result.is_err() {
|
2012-06-09 22:46:29 -05:00
|
|
|
let err_data = w_result.get_err();
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("ERROR sock_buf as io::writer.writer err: %? %?",
|
|
|
|
err_data.err_name, err_data.err_msg));
|
2012-06-09 22:46:29 -05:00
|
|
|
}
|
2012-06-07 18:42:39 -05:00
|
|
|
}
|
2012-08-14 15:38:35 -05:00
|
|
|
fn seek(dist: int, seek: io::SeekStyle) {
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("tcp_socket_buf seek stub %? %?", dist, seek));
|
2012-06-07 18:42:39 -05:00
|
|
|
// noop
|
|
|
|
}
|
|
|
|
fn tell() -> uint {
|
|
|
|
0u
|
|
|
|
}
|
|
|
|
fn flush() -> int {
|
|
|
|
0
|
|
|
|
}
|
2012-08-14 15:38:35 -05:00
|
|
|
fn get_type() -> io::WriterType {
|
|
|
|
io::File
|
2012-07-26 16:47:51 -05:00
|
|
|
}
|
2012-06-07 18:42:39 -05:00
|
|
|
}
|
|
|
|
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
// INTERNAL API
|
|
|
|
|
2012-08-30 13:01:39 -05:00
|
|
|
fn tear_down_socket_data(socket_data: @TcpSocketData) unsafe {
|
2012-08-27 16:22:25 -05:00
|
|
|
let closed_po = core::comm::Port::<()>();
|
|
|
|
let closed_ch = core::comm::Chan(closed_po);
|
2012-05-28 00:50:11 -05:00
|
|
|
let close_data = {
|
|
|
|
closed_ch: closed_ch
|
|
|
|
};
|
|
|
|
let close_data_ptr = ptr::addr_of(close_data);
|
|
|
|
let stream_handle_ptr = (*socket_data).stream_handle_ptr;
|
2012-08-24 14:17:08 -05:00
|
|
|
do iotask::interact((*socket_data).iotask) |loop_ptr| unsafe {
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("interact dtor for tcp_socket stream %? loop %?",
|
|
|
|
stream_handle_ptr, loop_ptr));
|
2012-05-28 00:50:11 -05:00
|
|
|
uv::ll::set_data_for_uv_handle(stream_handle_ptr,
|
|
|
|
close_data_ptr);
|
|
|
|
uv::ll::close(stream_handle_ptr, tcp_socket_dtor_close_cb);
|
|
|
|
};
|
2012-08-14 16:17:27 -05:00
|
|
|
core::comm::recv(closed_po);
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("about to free socket_data at %?", socket_data));
|
2012-05-28 00:50:11 -05:00
|
|
|
rustrt::rust_uv_current_kernel_free(stream_handle_ptr
|
|
|
|
as *libc::c_void);
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"exiting dtor for tcp_socket");
|
2012-05-28 00:50:11 -05:00
|
|
|
}
|
|
|
|
|
2012-05-21 11:58:33 -05:00
|
|
|
// shared implementation for tcp::read
|
2012-08-30 13:01:39 -05:00
|
|
|
fn read_common_impl(socket_data: *TcpSocketData, timeout_msecs: uint)
|
|
|
|
-> result::Result<~[u8],TcpErrData> unsafe {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"starting tcp::read");
|
2012-05-25 01:42:12 -05:00
|
|
|
let iotask = (*socket_data).iotask;
|
2012-05-21 11:58:33 -05:00
|
|
|
let rs_result = read_start_common_impl(socket_data);
|
2012-09-25 18:23:04 -05:00
|
|
|
if result::is_err(&rs_result) {
|
|
|
|
let err_data = result::get_err(&rs_result);
|
2012-08-26 18:54:31 -05:00
|
|
|
result::Err(err_data)
|
2012-05-21 11:58:33 -05:00
|
|
|
}
|
|
|
|
else {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"tcp::read before recv_timeout");
|
2012-05-21 11:58:33 -05:00
|
|
|
let read_result = if timeout_msecs > 0u {
|
|
|
|
timer::recv_timeout(
|
2012-09-25 18:23:04 -05:00
|
|
|
iotask, timeout_msecs, result::get(&rs_result))
|
2012-05-21 11:58:33 -05:00
|
|
|
} else {
|
2012-09-25 18:23:04 -05:00
|
|
|
Some(core::comm::recv(result::get(&rs_result)))
|
2012-05-21 11:58:33 -05:00
|
|
|
};
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"tcp::read after recv_timeout");
|
2012-08-06 14:34:08 -05:00
|
|
|
match read_result {
|
2012-08-20 14:23:37 -05:00
|
|
|
None => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"tcp::read: timed out..");
|
2012-05-21 11:58:33 -05:00
|
|
|
let err_data = {
|
2012-07-14 00:57:48 -05:00
|
|
|
err_name: ~"TIMEOUT",
|
|
|
|
err_msg: ~"req timed out"
|
2012-05-21 11:58:33 -05:00
|
|
|
};
|
|
|
|
read_stop_common_impl(socket_data);
|
2012-08-26 18:54:31 -05:00
|
|
|
result::Err(err_data)
|
2012-05-21 11:58:33 -05:00
|
|
|
}
|
2012-08-20 14:23:37 -05:00
|
|
|
Some(data_result) => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"tcp::read got data");
|
2012-05-21 11:58:33 -05:00
|
|
|
read_stop_common_impl(socket_data);
|
|
|
|
data_result
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// shared impl for read_stop
|
2012-08-30 13:01:39 -05:00
|
|
|
fn read_stop_common_impl(socket_data: *TcpSocketData) ->
|
|
|
|
result::Result<(), TcpErrData> unsafe {
|
2012-05-21 11:58:33 -05:00
|
|
|
let stream_handle_ptr = (*socket_data).stream_handle_ptr;
|
2012-08-30 13:01:39 -05:00
|
|
|
let stop_po = core::comm::Port::<Option<TcpErrData>>();
|
2012-08-27 16:22:25 -05:00
|
|
|
let stop_ch = core::comm::Chan(stop_po);
|
2012-08-24 14:17:08 -05:00
|
|
|
do iotask::interact((*socket_data).iotask) |loop_ptr| unsafe {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"in interact cb for tcp::read_stop");
|
2012-08-06 14:34:08 -05:00
|
|
|
match uv::ll::read_stop(stream_handle_ptr as *uv::ll::uv_stream_t) {
|
2012-08-03 21:59:04 -05:00
|
|
|
0i32 => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"successfully called uv_read_stop");
|
2012-08-20 14:23:37 -05:00
|
|
|
core::comm::send(stop_ch, None);
|
2012-05-21 11:58:33 -05:00
|
|
|
}
|
2012-08-03 21:59:04 -05:00
|
|
|
_ => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"failure in calling uv_read_stop");
|
2012-05-21 11:58:33 -05:00
|
|
|
let err_data = uv::ll::get_last_err_data(loop_ptr);
|
2012-08-20 14:23:37 -05:00
|
|
|
core::comm::send(stop_ch, Some(err_data.to_tcp_err()));
|
2012-05-21 11:58:33 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2012-08-14 16:17:27 -05:00
|
|
|
match core::comm::recv(stop_po) {
|
2012-08-26 18:54:31 -05:00
|
|
|
Some(err_data) => result::Err(err_data.to_tcp_err()),
|
|
|
|
None => result::Ok(())
|
2012-05-21 11:58:33 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// shared impl for read_start
|
2012-08-30 13:01:39 -05:00
|
|
|
fn read_start_common_impl(socket_data: *TcpSocketData)
|
2012-08-26 18:54:31 -05:00
|
|
|
-> result::Result<comm::Port<
|
2012-08-30 13:01:39 -05:00
|
|
|
result::Result<~[u8], TcpErrData>>, TcpErrData> unsafe {
|
2012-05-21 11:58:33 -05:00
|
|
|
let stream_handle_ptr = (*socket_data).stream_handle_ptr;
|
2012-08-27 16:22:25 -05:00
|
|
|
let start_po = core::comm::Port::<Option<uv::ll::uv_err_data>>();
|
|
|
|
let start_ch = core::comm::Chan(start_po);
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"in tcp::read_start before interact loop");
|
2012-08-24 14:17:08 -05:00
|
|
|
do iotask::interact((*socket_data).iotask) |loop_ptr| unsafe {
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("in tcp::read_start interact cb %?", loop_ptr));
|
2012-08-06 14:34:08 -05:00
|
|
|
match uv::ll::read_start(stream_handle_ptr as *uv::ll::uv_stream_t,
|
2012-05-21 11:58:33 -05:00
|
|
|
on_alloc_cb,
|
|
|
|
on_tcp_read_cb) {
|
2012-08-03 21:59:04 -05:00
|
|
|
0i32 => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"success doing uv_read_start");
|
2012-08-20 14:23:37 -05:00
|
|
|
core::comm::send(start_ch, None);
|
2012-05-21 11:58:33 -05:00
|
|
|
}
|
2012-08-03 21:59:04 -05:00
|
|
|
_ => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"error attempting uv_read_start");
|
2012-05-21 11:58:33 -05:00
|
|
|
let err_data = uv::ll::get_last_err_data(loop_ptr);
|
2012-08-20 14:23:37 -05:00
|
|
|
core::comm::send(start_ch, Some(err_data));
|
2012-05-21 11:58:33 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2012-08-14 16:17:27 -05:00
|
|
|
match core::comm::recv(start_po) {
|
2012-08-26 18:54:31 -05:00
|
|
|
Some(err_data) => result::Err(err_data.to_tcp_err()),
|
|
|
|
None => result::Ok((*socket_data).reader_po)
|
2012-05-21 11:58:33 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-07 18:42:39 -05:00
|
|
|
// helper to convert a "class" vector of [u8] to a *[uv::ll::uv_buf_t]
|
|
|
|
|
2012-05-21 11:58:33 -05:00
|
|
|
// shared implementation used by write and write_future
|
2012-08-30 13:01:39 -05:00
|
|
|
fn write_common_impl(socket_data_ptr: *TcpSocketData,
|
2012-06-29 18:26:56 -05:00
|
|
|
raw_write_data: ~[u8])
|
2012-08-30 13:01:39 -05:00
|
|
|
-> result::Result<(), TcpErrData> unsafe {
|
2012-05-21 11:58:33 -05:00
|
|
|
let write_req_ptr = ptr::addr_of((*socket_data_ptr).write_req);
|
|
|
|
let stream_handle_ptr =
|
|
|
|
(*socket_data_ptr).stream_handle_ptr;
|
2012-06-29 18:26:56 -05:00
|
|
|
let write_buf_vec = ~[ uv::ll::buf_init(
|
2012-09-12 19:45:23 -05:00
|
|
|
vec::raw::to_ptr(raw_write_data),
|
2012-06-29 18:26:56 -05:00
|
|
|
vec::len(raw_write_data)) ];
|
2012-05-21 11:58:33 -05:00
|
|
|
let write_buf_vec_ptr = ptr::addr_of(write_buf_vec);
|
2012-08-30 13:01:39 -05:00
|
|
|
let result_po = core::comm::Port::<TcpWriteResult>();
|
2012-05-21 11:58:33 -05:00
|
|
|
let write_data = {
|
2012-08-27 16:22:25 -05:00
|
|
|
result_ch: core::comm::Chan(result_po)
|
2012-05-21 11:58:33 -05:00
|
|
|
};
|
|
|
|
let write_data_ptr = ptr::addr_of(write_data);
|
2012-08-24 14:17:08 -05:00
|
|
|
do iotask::interact((*socket_data_ptr).iotask) |loop_ptr| unsafe {
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("in interact cb for tcp::write %?", loop_ptr));
|
2012-08-06 14:34:08 -05:00
|
|
|
match uv::ll::write(write_req_ptr,
|
2012-05-21 11:58:33 -05:00
|
|
|
stream_handle_ptr,
|
|
|
|
write_buf_vec_ptr,
|
|
|
|
tcp_write_complete_cb) {
|
2012-08-03 21:59:04 -05:00
|
|
|
0i32 => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"uv_write() invoked successfully");
|
2012-05-21 11:58:33 -05:00
|
|
|
uv::ll::set_data_for_req(write_req_ptr, write_data_ptr);
|
|
|
|
}
|
2012-08-03 21:59:04 -05:00
|
|
|
_ => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"error invoking uv_write()");
|
2012-05-21 11:58:33 -05:00
|
|
|
let err_data = uv::ll::get_last_err_data(loop_ptr);
|
2012-08-14 16:17:27 -05:00
|
|
|
core::comm::send((*write_data_ptr).result_ch,
|
2012-08-30 13:01:39 -05:00
|
|
|
TcpWriteError(err_data.to_tcp_err()));
|
2012-05-21 11:58:33 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2012-06-21 18:44:10 -05:00
|
|
|
// FIXME (#2656): Instead of passing unsafe pointers to local data,
|
|
|
|
// and waiting here for the write to complete, we should transfer
|
|
|
|
// ownership of everything to the I/O task and let it deal with the
|
|
|
|
// aftermath, so we don't have to sit here blocking.
|
2012-08-14 16:17:27 -05:00
|
|
|
match core::comm::recv(result_po) {
|
2012-08-30 13:01:39 -05:00
|
|
|
TcpWriteSuccess => result::Ok(()),
|
|
|
|
TcpWriteError(err_data) => result::Err(err_data.to_tcp_err())
|
2012-05-21 11:58:33 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-30 13:01:39 -05:00
|
|
|
enum TcpNewConnection {
|
|
|
|
NewTcpConn(*uv::ll::uv_tcp_t)
|
2012-05-05 11:30:56 -05:00
|
|
|
}
|
|
|
|
|
2012-08-30 13:01:39 -05:00
|
|
|
type TcpListenFcData = {
|
2012-05-05 11:30:56 -05:00
|
|
|
server_stream_ptr: *uv::ll::uv_tcp_t,
|
2012-08-15 16:10:46 -05:00
|
|
|
stream_closed_ch: comm::Chan<()>,
|
2012-08-30 13:01:39 -05:00
|
|
|
kill_ch: comm::Chan<Option<TcpErrData>>,
|
2012-05-29 17:50:10 -05:00
|
|
|
on_connect_cb: fn~(*uv::ll::uv_tcp_t),
|
2012-08-29 19:41:38 -05:00
|
|
|
iotask: IoTask,
|
2012-05-05 11:30:56 -05:00
|
|
|
mut active: bool
|
|
|
|
};
|
|
|
|
|
2012-07-03 18:32:02 -05:00
|
|
|
extern fn tcp_lfc_close_cb(handle: *uv::ll::uv_tcp_t) unsafe {
|
2012-05-05 11:30:56 -05:00
|
|
|
let server_data_ptr = uv::ll::get_data_for_uv_handle(
|
2012-08-30 13:01:39 -05:00
|
|
|
handle) as *TcpListenFcData;
|
2012-08-14 16:17:27 -05:00
|
|
|
core::comm::send((*server_data_ptr).stream_closed_ch, ());
|
2012-05-05 11:30:56 -05:00
|
|
|
}
|
|
|
|
|
2012-07-03 18:32:02 -05:00
|
|
|
extern fn tcp_lfc_on_connection_cb(handle: *uv::ll::uv_tcp_t,
|
2012-05-05 11:30:56 -05:00
|
|
|
status: libc::c_int) unsafe {
|
|
|
|
let server_data_ptr = uv::ll::get_data_for_uv_handle(handle)
|
2012-08-30 13:01:39 -05:00
|
|
|
as *TcpListenFcData;
|
2012-05-05 11:30:56 -05:00
|
|
|
let kill_ch = (*server_data_ptr).kill_ch;
|
2012-05-21 08:45:18 -05:00
|
|
|
if (*server_data_ptr).active {
|
2012-08-06 14:34:08 -05:00
|
|
|
match status {
|
2012-08-03 21:59:04 -05:00
|
|
|
0i32 => (*server_data_ptr).on_connect_cb(handle),
|
|
|
|
_ => {
|
2012-05-05 11:30:56 -05:00
|
|
|
let loop_ptr = uv::ll::get_loop_for_uv_handle(handle);
|
2012-08-14 16:17:27 -05:00
|
|
|
core::comm::send(kill_ch,
|
2012-08-20 14:23:37 -05:00
|
|
|
Some(uv::ll::get_last_err_data(loop_ptr)
|
2012-05-05 11:30:56 -05:00
|
|
|
.to_tcp_err()));
|
|
|
|
(*server_data_ptr).active = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
fn malloc_uv_tcp_t() -> *uv::ll::uv_tcp_t unsafe {
|
2012-05-16 17:05:48 -05:00
|
|
|
rustrt::rust_uv_current_kernel_malloc(
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
rustrt::rust_uv_helper_uv_tcp_t_size()) as *uv::ll::uv_tcp_t
|
2012-05-16 17:05:48 -05:00
|
|
|
}
|
|
|
|
|
2012-08-30 13:01:39 -05:00
|
|
|
enum TcpConnectResult {
|
|
|
|
TcpConnected(TcpSocket),
|
|
|
|
TcpConnectError(TcpErrData)
|
2012-05-02 18:22:44 -05:00
|
|
|
}
|
|
|
|
|
2012-08-30 13:01:39 -05:00
|
|
|
enum TcpWriteResult {
|
|
|
|
TcpWriteSuccess,
|
|
|
|
TcpWriteError(TcpErrData)
|
2012-05-02 18:22:44 -05:00
|
|
|
}
|
|
|
|
|
2012-08-30 13:01:39 -05:00
|
|
|
enum TcpReadStartResult {
|
|
|
|
TcpReadStartSuccess(comm::Port<TcpReadResult>),
|
|
|
|
TcpReadStartError(TcpErrData)
|
2012-05-02 18:22:44 -05:00
|
|
|
}
|
|
|
|
|
2012-08-30 13:01:39 -05:00
|
|
|
enum TcpReadResult {
|
|
|
|
TcpReadData(~[u8]),
|
|
|
|
TcpReadDone,
|
|
|
|
TcpReadErr(TcpErrData)
|
2012-05-02 18:22:44 -05:00
|
|
|
}
|
|
|
|
|
2012-08-30 13:01:39 -05:00
|
|
|
trait ToTcpErr {
|
|
|
|
fn to_tcp_err() -> TcpErrData;
|
2012-05-02 18:22:44 -05:00
|
|
|
}
|
|
|
|
|
2012-08-30 13:01:39 -05:00
|
|
|
impl uv::ll::uv_err_data: ToTcpErr {
|
|
|
|
fn to_tcp_err() -> TcpErrData {
|
2012-05-02 18:22:44 -05:00
|
|
|
{ err_name: self.err_name, err_msg: self.err_msg }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-03 18:32:02 -05:00
|
|
|
extern fn on_tcp_read_cb(stream: *uv::ll::uv_stream_t,
|
2012-05-01 20:27:07 -05:00
|
|
|
nread: libc::ssize_t,
|
|
|
|
++buf: uv::ll::uv_buf_t) unsafe {
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("entering on_tcp_read_cb stream: %? nread: %?",
|
|
|
|
stream, nread));
|
2012-05-01 20:27:07 -05:00
|
|
|
let loop_ptr = uv::ll::get_loop_for_uv_handle(stream);
|
|
|
|
let socket_data_ptr = uv::ll::get_data_for_uv_handle(stream)
|
2012-08-30 13:01:39 -05:00
|
|
|
as *TcpSocketData;
|
2012-08-06 14:34:08 -05:00
|
|
|
match nread as int {
|
2012-05-01 20:27:07 -05:00
|
|
|
// incoming err.. probably eof
|
2012-08-03 21:59:04 -05:00
|
|
|
-1 => {
|
2012-05-09 17:07:54 -05:00
|
|
|
let err_data = uv::ll::get_last_err_data(loop_ptr).to_tcp_err();
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("on_tcp_read_cb: incoming err.. name %? msg %?",
|
|
|
|
err_data.err_name, err_data.err_msg));
|
2012-05-09 17:07:54 -05:00
|
|
|
let reader_ch = (*socket_data_ptr).reader_ch;
|
2012-08-26 18:54:31 -05:00
|
|
|
core::comm::send(reader_ch, result::Err(err_data));
|
2012-05-01 20:27:07 -05:00
|
|
|
}
|
|
|
|
// do nothing .. unneeded buf
|
2012-08-03 21:59:04 -05:00
|
|
|
0 => (),
|
2012-05-01 20:27:07 -05:00
|
|
|
// have data
|
2012-08-03 21:59:04 -05:00
|
|
|
_ => {
|
2012-05-01 20:27:07 -05:00
|
|
|
// we have data
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("tcp on_read_cb nread: %d", nread as int));
|
2012-05-09 17:07:54 -05:00
|
|
|
let reader_ch = (*socket_data_ptr).reader_ch;
|
2012-05-01 20:27:07 -05:00
|
|
|
let buf_base = uv::ll::get_base_from_buf(buf);
|
2012-09-12 19:45:23 -05:00
|
|
|
let new_bytes = vec::raw::from_buf(buf_base, nread as uint);
|
2012-08-26 18:54:31 -05:00
|
|
|
core::comm::send(reader_ch, result::Ok(new_bytes));
|
2012-05-01 20:27:07 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
uv::ll::free_base_of_buf(buf);
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"exiting on_tcp_read_cb");
|
2012-05-01 20:27:07 -05:00
|
|
|
}
|
|
|
|
|
2012-07-03 18:32:02 -05:00
|
|
|
extern fn on_alloc_cb(handle: *libc::c_void,
|
2012-09-19 20:32:13 -05:00
|
|
|
suggested_size: size_t)
|
2012-05-01 20:27:07 -05:00
|
|
|
-> uv::ll::uv_buf_t unsafe {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"tcp read on_alloc_cb!");
|
2012-05-01 20:27:07 -05:00
|
|
|
let char_ptr = uv::ll::malloc_buf_base_of(suggested_size);
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("tcp read on_alloc_cb h: %? char_ptr: %u sugsize: %u",
|
2012-05-01 20:27:07 -05:00
|
|
|
handle,
|
|
|
|
char_ptr as uint,
|
2012-08-22 19:24:52 -05:00
|
|
|
suggested_size as uint));
|
2012-06-04 19:26:17 -05:00
|
|
|
uv::ll::buf_init(char_ptr, suggested_size as uint)
|
2012-05-01 20:27:07 -05:00
|
|
|
}
|
2012-05-01 18:42:33 -05:00
|
|
|
|
2012-08-30 13:01:39 -05:00
|
|
|
type TcpSocketCloseData = {
|
2012-08-15 16:10:46 -05:00
|
|
|
closed_ch: comm::Chan<()>
|
2012-05-01 18:42:33 -05:00
|
|
|
};
|
|
|
|
|
2012-07-03 18:32:02 -05:00
|
|
|
extern fn tcp_socket_dtor_close_cb(handle: *uv::ll::uv_tcp_t) unsafe {
|
2012-05-01 18:42:33 -05:00
|
|
|
let data = uv::ll::get_data_for_uv_handle(handle)
|
2012-08-30 13:01:39 -05:00
|
|
|
as *TcpSocketCloseData;
|
2012-05-01 18:42:33 -05:00
|
|
|
let closed_ch = (*data).closed_ch;
|
2012-08-14 16:17:27 -05:00
|
|
|
core::comm::send(closed_ch, ());
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"tcp_socket_dtor_close_cb exiting..");
|
2012-05-01 18:42:33 -05:00
|
|
|
}
|
|
|
|
|
2012-07-03 18:32:02 -05:00
|
|
|
extern fn tcp_write_complete_cb(write_req: *uv::ll::uv_write_t,
|
2012-05-01 18:42:33 -05:00
|
|
|
status: libc::c_int) unsafe {
|
|
|
|
let write_data_ptr = uv::ll::get_data_for_req(write_req)
|
2012-08-30 13:01:39 -05:00
|
|
|
as *WriteReqData;
|
2012-06-21 18:44:10 -05:00
|
|
|
if status == 0i32 {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"successful write complete");
|
2012-08-30 13:01:39 -05:00
|
|
|
core::comm::send((*write_data_ptr).result_ch, TcpWriteSuccess);
|
2012-06-21 18:44:10 -05:00
|
|
|
} else {
|
2012-05-01 18:42:33 -05:00
|
|
|
let stream_handle_ptr = uv::ll::get_stream_handle_from_write_req(
|
|
|
|
write_req);
|
|
|
|
let loop_ptr = uv::ll::get_loop_for_uv_handle(stream_handle_ptr);
|
|
|
|
let err_data = uv::ll::get_last_err_data(loop_ptr);
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"failure to write");
|
2012-08-14 16:17:27 -05:00
|
|
|
core::comm::send((*write_data_ptr).result_ch,
|
2012-08-30 13:01:39 -05:00
|
|
|
TcpWriteError(err_data));
|
2012-05-01 18:42:33 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-30 13:01:39 -05:00
|
|
|
type WriteReqData = {
|
|
|
|
result_ch: comm::Chan<TcpWriteResult>
|
2012-05-01 18:42:33 -05:00
|
|
|
};
|
|
|
|
|
2012-08-30 13:01:39 -05:00
|
|
|
type ConnectReqData = {
|
|
|
|
result_ch: comm::Chan<ConnAttempt>,
|
2012-08-15 16:10:46 -05:00
|
|
|
closed_signal_ch: comm::Chan<()>
|
2012-04-30 23:59:20 -05:00
|
|
|
};
|
|
|
|
|
2012-07-03 18:32:02 -05:00
|
|
|
extern fn stream_error_close_cb(handle: *uv::ll::uv_tcp_t) unsafe {
|
2012-04-30 23:59:20 -05:00
|
|
|
let data = uv::ll::get_data_for_uv_handle(handle) as
|
2012-08-30 13:01:39 -05:00
|
|
|
*ConnectReqData;
|
2012-08-14 16:17:27 -05:00
|
|
|
core::comm::send((*data).closed_signal_ch, ());
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("exiting steam_error_close_cb for %?", handle));
|
2012-04-30 23:59:20 -05:00
|
|
|
}
|
|
|
|
|
2012-07-03 18:32:02 -05:00
|
|
|
extern fn tcp_connect_close_cb(handle: *uv::ll::uv_tcp_t) unsafe {
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("closed client tcp handle %?", handle));
|
2012-04-30 23:59:20 -05:00
|
|
|
}
|
|
|
|
|
2012-07-03 18:32:02 -05:00
|
|
|
extern fn tcp_connect_on_connect_cb(connect_req_ptr: *uv::ll::uv_connect_t,
|
2012-04-30 23:59:20 -05:00
|
|
|
status: libc::c_int) unsafe {
|
|
|
|
let conn_data_ptr = (uv::ll::get_data_for_req(connect_req_ptr)
|
2012-08-30 13:01:39 -05:00
|
|
|
as *ConnectReqData);
|
2012-04-30 23:59:20 -05:00
|
|
|
let result_ch = (*conn_data_ptr).result_ch;
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("tcp_connect result_ch %?", result_ch));
|
2012-04-30 23:59:20 -05:00
|
|
|
let tcp_stream_ptr =
|
|
|
|
uv::ll::get_stream_handle_from_connect_req(connect_req_ptr);
|
2012-08-06 14:34:08 -05:00
|
|
|
match status {
|
2012-08-03 21:59:04 -05:00
|
|
|
0i32 => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"successful tcp connection!");
|
2012-08-30 13:01:39 -05:00
|
|
|
core::comm::send(result_ch, ConnSuccess);
|
2012-04-30 23:59:20 -05:00
|
|
|
}
|
2012-08-03 21:59:04 -05:00
|
|
|
_ => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"error in tcp_connect_on_connect_cb");
|
2012-04-30 23:59:20 -05:00
|
|
|
let loop_ptr = uv::ll::get_loop_for_uv_handle(tcp_stream_ptr);
|
|
|
|
let err_data = uv::ll::get_last_err_data(loop_ptr);
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("err_data %? %?", err_data.err_name,
|
|
|
|
err_data.err_msg));
|
2012-08-30 13:01:39 -05:00
|
|
|
core::comm::send(result_ch, ConnFailure(err_data));
|
2012-04-30 23:59:20 -05:00
|
|
|
uv::ll::set_data_for_uv_handle(tcp_stream_ptr,
|
|
|
|
conn_data_ptr);
|
|
|
|
uv::ll::close(tcp_stream_ptr, stream_error_close_cb);
|
|
|
|
}
|
|
|
|
}
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"leaving tcp_connect_on_connect_cb");
|
2012-04-30 23:59:20 -05:00
|
|
|
}
|
|
|
|
|
2012-08-30 13:01:39 -05:00
|
|
|
enum ConnAttempt {
|
|
|
|
ConnSuccess,
|
|
|
|
ConnFailure(uv::ll::uv_err_data)
|
2012-04-30 23:59:20 -05:00
|
|
|
}
|
|
|
|
|
2012-08-30 13:01:39 -05:00
|
|
|
type TcpSocketData = {
|
|
|
|
reader_po: comm::Port<result::Result<~[u8], TcpErrData>>,
|
|
|
|
reader_ch: comm::Chan<result::Result<~[u8], TcpErrData>>,
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
stream_handle_ptr: *uv::ll::uv_tcp_t,
|
2012-04-30 23:59:20 -05:00
|
|
|
connect_req: uv::ll::uv_connect_t,
|
2012-05-01 18:42:33 -05:00
|
|
|
write_req: uv::ll::uv_write_t,
|
2012-08-29 19:41:38 -05:00
|
|
|
iotask: IoTask
|
2012-04-30 23:59:20 -05:00
|
|
|
};
|
|
|
|
|
2012-08-30 13:01:39 -05:00
|
|
|
type TcpBufferedSocketData = {
|
|
|
|
sock: TcpSocket,
|
2012-07-11 18:49:02 -05:00
|
|
|
mut buf: ~[u8]
|
2012-06-07 18:42:39 -05:00
|
|
|
};
|
2012-04-30 23:59:20 -05:00
|
|
|
|
2012-06-09 22:46:29 -05:00
|
|
|
//#[cfg(test)]
|
2012-04-26 16:30:22 -05:00
|
|
|
mod test {
|
2012-09-21 20:10:45 -05:00
|
|
|
#[legacy_exports];
|
2012-06-21 18:44:10 -05:00
|
|
|
// FIXME don't run on fbsd or linux 32 bit (#2064)
|
2012-05-09 17:07:54 -05:00
|
|
|
#[cfg(target_os="win32")]
|
|
|
|
#[cfg(target_os="darwin")]
|
|
|
|
#[cfg(target_os="linux")]
|
|
|
|
mod tcp_ipv4_server_and_client_test {
|
2012-09-21 20:10:45 -05:00
|
|
|
#[legacy_exports];
|
2012-05-09 17:07:54 -05:00
|
|
|
#[cfg(target_arch="x86_64")]
|
|
|
|
mod impl64 {
|
2012-09-21 20:10:45 -05:00
|
|
|
#[legacy_exports];
|
2012-05-09 17:07:54 -05:00
|
|
|
#[test]
|
|
|
|
fn test_gl_tcp_server_and_client_ipv4() unsafe {
|
|
|
|
impl_gl_tcp_ipv4_server_and_client();
|
|
|
|
}
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
#[test]
|
2012-05-28 00:50:11 -05:00
|
|
|
fn test_gl_tcp_ipv4_client_error_connection_refused() unsafe {
|
|
|
|
impl_gl_tcp_ipv4_client_error_connection_refused();
|
|
|
|
}
|
2012-06-05 08:40:39 -05:00
|
|
|
#[test]
|
|
|
|
fn test_gl_tcp_server_address_in_use() unsafe {
|
|
|
|
impl_gl_tcp_ipv4_server_address_in_use();
|
|
|
|
}
|
|
|
|
#[test]
|
|
|
|
fn test_gl_tcp_server_access_denied() unsafe {
|
|
|
|
impl_gl_tcp_ipv4_server_access_denied();
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
}
|
2012-06-09 22:46:29 -05:00
|
|
|
#[test]
|
|
|
|
fn test_gl_tcp_ipv4_server_client_reader_writer() {
|
|
|
|
impl_gl_tcp_ipv4_server_client_reader_writer();
|
|
|
|
}
|
2012-06-05 08:40:39 -05:00
|
|
|
|
2012-05-09 17:07:54 -05:00
|
|
|
}
|
|
|
|
#[cfg(target_arch="x86")]
|
|
|
|
mod impl32 {
|
2012-09-21 20:10:45 -05:00
|
|
|
#[legacy_exports];
|
2012-05-09 17:07:54 -05:00
|
|
|
#[test]
|
|
|
|
#[ignore(cfg(target_os = "linux"))]
|
|
|
|
fn test_gl_tcp_server_and_client_ipv4() unsafe {
|
|
|
|
impl_gl_tcp_ipv4_server_and_client();
|
|
|
|
}
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
#[test]
|
|
|
|
#[ignore(cfg(target_os = "linux"))]
|
2012-05-28 00:50:11 -05:00
|
|
|
fn test_gl_tcp_ipv4_client_error_connection_refused() unsafe {
|
|
|
|
impl_gl_tcp_ipv4_client_error_connection_refused();
|
|
|
|
}
|
2012-06-05 08:40:39 -05:00
|
|
|
#[test]
|
|
|
|
#[ignore(cfg(target_os = "linux"))]
|
|
|
|
fn test_gl_tcp_server_address_in_use() unsafe {
|
|
|
|
impl_gl_tcp_ipv4_server_address_in_use();
|
|
|
|
}
|
|
|
|
#[test]
|
|
|
|
#[ignore(cfg(target_os = "linux"))]
|
2012-07-02 18:50:47 -05:00
|
|
|
#[ignore(cfg(windows), reason = "deadlocking bots")]
|
2012-06-05 08:40:39 -05:00
|
|
|
fn test_gl_tcp_server_access_denied() unsafe {
|
|
|
|
impl_gl_tcp_ipv4_server_access_denied();
|
|
|
|
}
|
2012-06-09 22:46:29 -05:00
|
|
|
#[test]
|
|
|
|
#[ignore(cfg(target_os = "linux"))]
|
|
|
|
fn test_gl_tcp_ipv4_server_client_reader_writer() {
|
|
|
|
impl_gl_tcp_ipv4_server_client_reader_writer();
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
}
|
2012-05-09 17:07:54 -05:00
|
|
|
}
|
2012-05-05 11:30:56 -05:00
|
|
|
}
|
2012-05-09 17:07:54 -05:00
|
|
|
fn impl_gl_tcp_ipv4_server_and_client() {
|
2012-05-22 18:33:33 -05:00
|
|
|
let hl_loop = uv::global_loop::get();
|
2012-07-14 00:57:48 -05:00
|
|
|
let server_ip = ~"127.0.0.1";
|
2012-05-05 11:30:56 -05:00
|
|
|
let server_port = 8888u;
|
2012-07-14 00:57:48 -05:00
|
|
|
let expected_req = ~"ping";
|
|
|
|
let expected_resp = ~"pong";
|
2012-05-09 17:07:54 -05:00
|
|
|
|
2012-08-27 16:22:25 -05:00
|
|
|
let server_result_po = core::comm::Port::<~str>();
|
|
|
|
let server_result_ch = core::comm::Chan(server_result_po);
|
2012-05-16 17:05:48 -05:00
|
|
|
|
2012-08-27 16:22:25 -05:00
|
|
|
let cont_po = core::comm::Port::<()>();
|
|
|
|
let cont_ch = core::comm::Chan(cont_po);
|
2012-05-09 17:07:54 -05:00
|
|
|
// server
|
2012-08-15 16:10:46 -05:00
|
|
|
do task::spawn_sched(task::ManualThreads(1u)) {
|
2012-06-30 18:19:07 -05:00
|
|
|
let actual_req = do comm::listen |server_ch| {
|
2012-05-09 17:07:54 -05:00
|
|
|
run_tcp_test_server(
|
|
|
|
server_ip,
|
|
|
|
server_port,
|
|
|
|
expected_resp,
|
2012-05-16 17:05:48 -05:00
|
|
|
server_ch,
|
2012-05-22 18:33:33 -05:00
|
|
|
cont_ch,
|
|
|
|
hl_loop)
|
2012-05-09 17:07:54 -05:00
|
|
|
};
|
|
|
|
server_result_ch.send(actual_req);
|
|
|
|
};
|
2012-08-14 16:17:27 -05:00
|
|
|
core::comm::recv(cont_po);
|
2012-05-09 17:07:54 -05:00
|
|
|
// client
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"server started, firing up client..");
|
2012-08-14 16:17:27 -05:00
|
|
|
let actual_resp_result = do core::comm::listen |client_ch| {
|
2012-05-09 17:07:54 -05:00
|
|
|
run_tcp_test_client(
|
|
|
|
server_ip,
|
|
|
|
server_port,
|
|
|
|
expected_req,
|
2012-05-22 18:33:33 -05:00
|
|
|
client_ch,
|
|
|
|
hl_loop)
|
2012-05-05 11:30:56 -05:00
|
|
|
};
|
2012-06-25 10:02:34 -05:00
|
|
|
assert actual_resp_result.is_ok();
|
2012-05-28 00:50:11 -05:00
|
|
|
let actual_resp = actual_resp_result.get();
|
2012-08-14 16:17:27 -05:00
|
|
|
let actual_req = core::comm::recv(server_result_po);
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("REQ: expected: '%s' actual: '%s'",
|
|
|
|
expected_req, actual_req));
|
|
|
|
log(debug, fmt!("RESP: expected: '%s' actual: '%s'",
|
|
|
|
expected_resp, actual_resp));
|
2012-05-09 17:07:54 -05:00
|
|
|
assert str::contains(actual_req, expected_req);
|
|
|
|
assert str::contains(actual_resp, expected_resp);
|
2012-05-05 11:30:56 -05:00
|
|
|
}
|
2012-05-28 00:50:11 -05:00
|
|
|
fn impl_gl_tcp_ipv4_client_error_connection_refused() {
|
2012-05-22 18:33:33 -05:00
|
|
|
let hl_loop = uv::global_loop::get();
|
2012-07-14 00:57:48 -05:00
|
|
|
let server_ip = ~"127.0.0.1";
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
let server_port = 8889u;
|
2012-07-14 00:57:48 -05:00
|
|
|
let expected_req = ~"ping";
|
2012-05-28 00:50:11 -05:00
|
|
|
// client
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"firing up client..");
|
2012-08-14 16:17:27 -05:00
|
|
|
let actual_resp_result = do core::comm::listen |client_ch| {
|
2012-05-28 00:50:11 -05:00
|
|
|
run_tcp_test_client(
|
|
|
|
server_ip,
|
|
|
|
server_port,
|
|
|
|
expected_req,
|
|
|
|
client_ch,
|
|
|
|
hl_loop)
|
|
|
|
};
|
2012-08-06 14:34:08 -05:00
|
|
|
match actual_resp_result.get_err() {
|
2012-08-30 13:01:39 -05:00
|
|
|
ConnectionRefused => (),
|
2012-08-03 21:59:04 -05:00
|
|
|
_ => fail ~"unknown error.. expected connection_refused"
|
2012-05-28 00:50:11 -05:00
|
|
|
}
|
|
|
|
}
|
2012-06-05 08:40:39 -05:00
|
|
|
fn impl_gl_tcp_ipv4_server_address_in_use() {
|
|
|
|
let hl_loop = uv::global_loop::get();
|
2012-07-14 00:57:48 -05:00
|
|
|
let server_ip = ~"127.0.0.1";
|
2012-06-05 08:40:39 -05:00
|
|
|
let server_port = 8890u;
|
2012-07-14 00:57:48 -05:00
|
|
|
let expected_req = ~"ping";
|
|
|
|
let expected_resp = ~"pong";
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
|
2012-08-27 16:22:25 -05:00
|
|
|
let server_result_po = core::comm::Port::<~str>();
|
|
|
|
let server_result_ch = core::comm::Chan(server_result_po);
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
|
2012-08-27 16:22:25 -05:00
|
|
|
let cont_po = core::comm::Port::<()>();
|
|
|
|
let cont_ch = core::comm::Chan(cont_po);
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
// server
|
2012-08-15 16:10:46 -05:00
|
|
|
do task::spawn_sched(task::ManualThreads(1u)) {
|
2012-06-30 18:19:07 -05:00
|
|
|
let actual_req = do comm::listen |server_ch| {
|
2012-06-05 08:40:39 -05:00
|
|
|
run_tcp_test_server(
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
server_ip,
|
|
|
|
server_port,
|
|
|
|
expected_resp,
|
|
|
|
server_ch,
|
2012-05-22 18:33:33 -05:00
|
|
|
cont_ch,
|
|
|
|
hl_loop)
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
};
|
|
|
|
server_result_ch.send(actual_req);
|
|
|
|
};
|
2012-08-14 16:17:27 -05:00
|
|
|
core::comm::recv(cont_po);
|
2012-06-05 08:40:39 -05:00
|
|
|
// this one should fail..
|
|
|
|
let listen_err = run_tcp_test_server_fail(
|
|
|
|
server_ip,
|
|
|
|
server_port,
|
|
|
|
hl_loop);
|
|
|
|
// client.. just doing this so that the first server tears down
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"server started, firing up client..");
|
2012-08-14 16:17:27 -05:00
|
|
|
do core::comm::listen |client_ch| {
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
run_tcp_test_client(
|
|
|
|
server_ip,
|
|
|
|
server_port,
|
|
|
|
expected_req,
|
2012-05-22 18:33:33 -05:00
|
|
|
client_ch,
|
|
|
|
hl_loop)
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
};
|
2012-08-06 14:34:08 -05:00
|
|
|
match listen_err {
|
2012-08-30 13:01:39 -05:00
|
|
|
AddressInUse => {
|
2012-06-05 08:40:39 -05:00
|
|
|
assert true;
|
|
|
|
}
|
2012-08-03 21:59:04 -05:00
|
|
|
_ => {
|
2012-07-14 00:57:48 -05:00
|
|
|
fail ~"expected address_in_use listen error,"+
|
2012-08-03 21:59:04 -05:00
|
|
|
~"but got a different error varient. check logs.";
|
2012-06-05 08:40:39 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fn impl_gl_tcp_ipv4_server_access_denied() {
|
|
|
|
let hl_loop = uv::global_loop::get();
|
2012-07-14 00:57:48 -05:00
|
|
|
let server_ip = ~"127.0.0.1";
|
2012-06-05 08:40:39 -05:00
|
|
|
let server_port = 80u;
|
|
|
|
// this one should fail..
|
|
|
|
let listen_err = run_tcp_test_server_fail(
|
|
|
|
server_ip,
|
|
|
|
server_port,
|
|
|
|
hl_loop);
|
2012-08-06 14:34:08 -05:00
|
|
|
match listen_err {
|
2012-08-30 13:01:39 -05:00
|
|
|
AccessDenied => {
|
2012-06-05 08:40:39 -05:00
|
|
|
assert true;
|
|
|
|
}
|
2012-08-03 21:59:04 -05:00
|
|
|
_ => {
|
2012-07-14 00:57:48 -05:00
|
|
|
fail ~"expected address_in_use listen error,"+
|
|
|
|
~"but got a different error varient. check logs.";
|
2012-06-05 08:40:39 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-06-09 22:46:29 -05:00
|
|
|
fn impl_gl_tcp_ipv4_server_client_reader_writer() {
|
2012-08-25 20:42:36 -05:00
|
|
|
/*
|
|
|
|
XXX: Causes an ICE.
|
|
|
|
|
2012-06-09 22:46:29 -05:00
|
|
|
let iotask = uv::global_loop::get();
|
2012-07-14 00:57:48 -05:00
|
|
|
let server_ip = ~"127.0.0.1";
|
2012-06-09 22:46:29 -05:00
|
|
|
let server_port = 8891u;
|
2012-07-14 00:57:48 -05:00
|
|
|
let expected_req = ~"ping";
|
|
|
|
let expected_resp = ~"pong";
|
2012-06-09 22:46:29 -05:00
|
|
|
|
2012-08-14 16:17:27 -05:00
|
|
|
let server_result_po = core::comm::port::<~str>();
|
|
|
|
let server_result_ch = core::comm::chan(server_result_po);
|
2012-06-09 22:46:29 -05:00
|
|
|
|
2012-08-14 16:17:27 -05:00
|
|
|
let cont_po = core::comm::port::<()>();
|
|
|
|
let cont_ch = core::comm::chan(cont_po);
|
2012-06-09 22:46:29 -05:00
|
|
|
// server
|
2012-08-15 16:10:46 -05:00
|
|
|
do task::spawn_sched(task::ManualThreads(1u)) {
|
2012-07-02 16:03:38 -05:00
|
|
|
let actual_req = do comm::listen |server_ch| {
|
2012-06-09 22:46:29 -05:00
|
|
|
run_tcp_test_server(
|
|
|
|
server_ip,
|
|
|
|
server_port,
|
|
|
|
expected_resp,
|
|
|
|
server_ch,
|
|
|
|
cont_ch,
|
|
|
|
iotask)
|
|
|
|
};
|
|
|
|
server_result_ch.send(actual_req);
|
|
|
|
};
|
2012-08-14 16:17:27 -05:00
|
|
|
core::comm::recv(cont_po);
|
2012-06-09 22:46:29 -05:00
|
|
|
// client
|
|
|
|
let server_addr = ip::v4::parse_addr(server_ip);
|
|
|
|
let conn_result = connect(server_addr, server_port, iotask);
|
2012-06-25 10:02:34 -05:00
|
|
|
if result::is_err(conn_result) {
|
2012-06-09 22:46:29 -05:00
|
|
|
assert false;
|
|
|
|
}
|
|
|
|
let sock_buf = @socket_buf(result::unwrap(conn_result));
|
2012-08-25 20:42:36 -05:00
|
|
|
buf_write(sock_buf, expected_req);
|
2012-06-09 22:46:29 -05:00
|
|
|
|
|
|
|
// so contrived!
|
2012-07-02 16:03:38 -05:00
|
|
|
let actual_resp = do str::as_bytes(expected_resp) |resp_buf| {
|
2012-08-25 20:42:36 -05:00
|
|
|
buf_read(sock_buf, vec::len(resp_buf))
|
2012-06-09 22:46:29 -05:00
|
|
|
};
|
2012-06-10 10:30:43 -05:00
|
|
|
|
2012-08-14 16:17:27 -05:00
|
|
|
let actual_req = core::comm::recv(server_result_po);
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("REQ: expected: '%s' actual: '%s'",
|
|
|
|
expected_req, actual_req));
|
|
|
|
log(debug, fmt!("RESP: expected: '%s' actual: '%s'",
|
|
|
|
expected_resp, actual_resp));
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
assert str::contains(actual_req, expected_req);
|
|
|
|
assert str::contains(actual_resp, expected_resp);
|
2012-08-25 20:42:36 -05:00
|
|
|
*/
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
}
|
2012-04-26 16:30:22 -05:00
|
|
|
|
2012-09-19 20:32:13 -05:00
|
|
|
fn buf_write<W:io::Writer>(+w: &W, val: &str) {
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("BUF_WRITE: val len %?", str::len(val)));
|
2012-07-02 16:03:38 -05:00
|
|
|
do str::byte_slice(val) |b_slice| {
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("BUF_WRITE: b_slice len %?",
|
|
|
|
vec::len(b_slice)));
|
2012-06-10 10:30:43 -05:00
|
|
|
w.write(b_slice)
|
2012-06-09 22:46:29 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-25 20:42:36 -05:00
|
|
|
fn buf_read<R:io::Reader>(+r: &R, len: uint) -> ~str {
|
|
|
|
let new_bytes = (*r).read_bytes(len);
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("in buf_read.. new_bytes len: %?",
|
|
|
|
vec::len(new_bytes)));
|
2012-06-09 22:46:29 -05:00
|
|
|
str::from_bytes(new_bytes)
|
|
|
|
}
|
2012-04-26 16:30:22 -05:00
|
|
|
|
2012-09-19 20:32:13 -05:00
|
|
|
fn run_tcp_test_server(server_ip: &str, server_port: uint, +resp: ~str,
|
2012-08-15 16:10:46 -05:00
|
|
|
server_ch: comm::Chan<~str>,
|
|
|
|
cont_ch: comm::Chan<()>,
|
2012-08-29 19:41:38 -05:00
|
|
|
iotask: IoTask) -> ~str {
|
2012-06-05 08:40:39 -05:00
|
|
|
let server_ip_addr = ip::v4::parse_addr(server_ip);
|
2012-09-11 19:17:54 -05:00
|
|
|
let listen_result = listen(move server_ip_addr, server_port, 128,
|
|
|
|
iotask,
|
2012-06-05 08:40:39 -05:00
|
|
|
// on_establish_cb -- called when listener is set up
|
2012-07-02 16:03:38 -05:00
|
|
|
|kill_ch| {
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("establish_cb %?",
|
|
|
|
kill_ch));
|
2012-08-14 16:17:27 -05:00
|
|
|
core::comm::send(cont_ch, ());
|
2012-06-05 08:40:39 -05:00
|
|
|
},
|
|
|
|
// risky to run this on the loop, but some users
|
|
|
|
// will want the POWER
|
2012-07-02 16:03:38 -05:00
|
|
|
|new_conn, kill_ch| {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"SERVER: new connection!");
|
2012-07-02 16:03:38 -05:00
|
|
|
do comm::listen |cont_ch| {
|
2012-08-15 16:10:46 -05:00
|
|
|
do task::spawn_sched(task::ManualThreads(1u)) {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"SERVER: starting worker for new req");
|
2012-06-05 08:40:39 -05:00
|
|
|
|
|
|
|
let accept_result = accept(new_conn);
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"SERVER: after accept()");
|
2012-09-25 18:23:04 -05:00
|
|
|
if result::is_err(&accept_result) {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"SERVER: error accept connection");
|
2012-09-25 18:23:04 -05:00
|
|
|
let err_data = result::get_err(&accept_result);
|
2012-08-20 14:23:37 -05:00
|
|
|
core::comm::send(kill_ch, Some(err_data));
|
2012-06-05 08:40:39 -05:00
|
|
|
log(debug,
|
2012-07-14 00:57:48 -05:00
|
|
|
~"SERVER/WORKER: send on err cont ch");
|
2012-06-05 08:40:39 -05:00
|
|
|
cont_ch.send(());
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
log(debug,
|
2012-07-14 00:57:48 -05:00
|
|
|
~"SERVER/WORKER: send on cont ch");
|
2012-06-05 08:40:39 -05:00
|
|
|
cont_ch.send(());
|
2012-09-10 19:50:48 -05:00
|
|
|
let sock = result::unwrap(move accept_result);
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"SERVER: successfully accepted"+
|
|
|
|
~"connection!");
|
2012-09-19 20:32:13 -05:00
|
|
|
let received_req_bytes = read(&sock, 0u);
|
2012-08-06 14:34:08 -05:00
|
|
|
match received_req_bytes {
|
2012-08-26 18:54:31 -05:00
|
|
|
result::Ok(data) => {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"SERVER: got REQ str::from_bytes..");
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("SERVER: REQ data len: %?",
|
|
|
|
vec::len(data)));
|
2012-06-05 08:40:39 -05:00
|
|
|
server_ch.send(
|
|
|
|
str::from_bytes(data));
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"SERVER: before write");
|
2012-09-19 20:32:13 -05:00
|
|
|
tcp_write_single(&sock, str::to_bytes(resp));
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"SERVER: after write.. die");
|
2012-08-20 14:23:37 -05:00
|
|
|
core::comm::send(kill_ch, None);
|
2012-06-05 08:40:39 -05:00
|
|
|
}
|
2012-08-26 18:54:31 -05:00
|
|
|
result::Err(err_data) => {
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("SERVER: error recvd: %s %s",
|
|
|
|
err_data.err_name, err_data.err_msg));
|
2012-08-20 14:23:37 -05:00
|
|
|
core::comm::send(kill_ch, Some(err_data));
|
2012-07-14 00:57:48 -05:00
|
|
|
server_ch.send(~"");
|
2012-06-05 08:40:39 -05:00
|
|
|
}
|
2012-05-09 17:07:54 -05:00
|
|
|
}
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"SERVER: worker spinning down");
|
2012-05-09 17:07:54 -05:00
|
|
|
}
|
2012-06-05 08:40:39 -05:00
|
|
|
}
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"SERVER: waiting to recv on cont_ch");
|
2012-06-05 08:40:39 -05:00
|
|
|
cont_ch.recv()
|
|
|
|
};
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"SERVER: recv'd on cont_ch..leaving listen cb");
|
2012-06-05 08:40:39 -05:00
|
|
|
});
|
|
|
|
// err check on listen_result
|
2012-09-25 18:23:04 -05:00
|
|
|
if result::is_err(&listen_result) {
|
|
|
|
match result::get_err(&listen_result) {
|
2012-08-30 13:01:39 -05:00
|
|
|
GenericListenErr(name, msg) => {
|
2012-08-22 19:24:52 -05:00
|
|
|
fail fmt!("SERVER: exited abnormally name %s msg %s",
|
|
|
|
name, msg);
|
2012-05-21 08:45:18 -05:00
|
|
|
}
|
2012-08-30 13:01:39 -05:00
|
|
|
AccessDenied => {
|
2012-07-14 00:57:48 -05:00
|
|
|
fail ~"SERVER: exited abnormally, got access denied..";
|
2012-06-05 08:40:39 -05:00
|
|
|
}
|
2012-08-30 13:01:39 -05:00
|
|
|
AddressInUse => {
|
2012-07-14 00:57:48 -05:00
|
|
|
fail ~"SERVER: exited abnormally, got address in use...";
|
2012-05-21 08:45:18 -05:00
|
|
|
}
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
}
|
2012-06-05 08:40:39 -05:00
|
|
|
}
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
let ret_val = server_ch.recv();
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("SERVER: exited and got return val: '%s'", ret_val));
|
std: splitting out tcp server API + tests
- we now have two interfaces for the TCP/IP server/listener workflow,
based on different user approaches surrounding how to deal with the
flow of accept a new tcp connection:
1. the "original" API closely mimics the low-level libuv API, in that we
have an on_connect_cb that the user provides *that is ran on the libuv
thread*. In this callback, the user can accept() a connection, turning it
into a tcp_socket.. of course, before accepting, they have the option
of passing it to a new task, provided they *make the cb block until
the accept is done* .. this is because, in libuv, you have to do the
uv_accept call in the span of that on_connect_cb callback that gets fired
when a new connection comes in. thems the breaks..
I wanted to just get rid of this API, because the general proposition of
users always running code on the libuv thread sounds like an invitation
for many future headaches. the API restriction to have to choose to
immediately accept a connection (and allow the user to block libuv as
needed) isn't too bad for power users who could conceive of circumstances
where they would drop an incoming TCP connection and know what they're
doing, in general.
but as a general API, I thought this was a bit cumbersome, so I ended up
devising..
2. an API that is initiated with a call to `net::tcp::new_listener()` ..
has a similar signature to `net::tcp::listen()`, except that is just
returns an object that sort of behaves like a `comm::port`. Users can
block on the `tcp_conn_port` to receive new connections, either in the
current task or in a new task, depending on which API route they take
(`net::tcp::conn_recv` or `net::tcp::conn_recv_spawn` respectively).. there
is also a `net::tcp::conn_peek` function that will do a peek on the
underlying port to see if there are pending connections.
The main difference, with this API, is that the low-level libuv glue is
going to *accept every connection attempt*, along with the overhead that
that brings. But, this is a much more hassle-free API for 95% of use
cases and will probably be the one that most users will want to reach for.
2012-05-17 15:27:08 -05:00
|
|
|
ret_val
|
|
|
|
}
|
|
|
|
|
2012-09-19 20:32:13 -05:00
|
|
|
fn run_tcp_test_server_fail(server_ip: &str, server_port: uint,
|
2012-08-30 13:01:39 -05:00
|
|
|
iotask: IoTask) -> TcpListenErrData {
|
2012-06-05 08:40:39 -05:00
|
|
|
let server_ip_addr = ip::v4::parse_addr(server_ip);
|
2012-09-11 19:17:54 -05:00
|
|
|
let listen_result = listen(move server_ip_addr, server_port, 128,
|
|
|
|
iotask,
|
2012-06-05 08:40:39 -05:00
|
|
|
// on_establish_cb -- called when listener is set up
|
2012-07-02 16:03:38 -05:00
|
|
|
|kill_ch| {
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("establish_cb %?",
|
|
|
|
kill_ch));
|
2012-06-05 08:40:39 -05:00
|
|
|
},
|
2012-07-02 16:03:38 -05:00
|
|
|
|new_conn, kill_ch| {
|
2012-08-22 19:24:52 -05:00
|
|
|
fail fmt!("SERVER: shouldn't be called.. %? %?",
|
|
|
|
new_conn, kill_ch);
|
2012-06-05 08:40:39 -05:00
|
|
|
});
|
|
|
|
// err check on listen_result
|
2012-09-25 18:23:04 -05:00
|
|
|
if result::is_err(&listen_result) {
|
|
|
|
result::get_err(&listen_result)
|
2012-06-05 08:40:39 -05:00
|
|
|
}
|
|
|
|
else {
|
2012-07-14 00:57:48 -05:00
|
|
|
fail ~"SERVER: did not fail as expected"
|
2012-06-05 08:40:39 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-19 20:32:13 -05:00
|
|
|
fn run_tcp_test_client(server_ip: &str, server_port: uint, resp: &str,
|
2012-08-15 16:10:46 -05:00
|
|
|
client_ch: comm::Chan<~str>,
|
2012-08-29 19:41:38 -05:00
|
|
|
iotask: IoTask) -> result::Result<~str,
|
2012-08-30 13:01:39 -05:00
|
|
|
TcpConnectErrData> {
|
2012-05-09 17:07:54 -05:00
|
|
|
let server_ip_addr = ip::v4::parse_addr(server_ip);
|
2012-05-02 18:22:44 -05:00
|
|
|
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"CLIENT: starting..");
|
2012-09-11 19:17:54 -05:00
|
|
|
let connect_result = connect(move server_ip_addr, server_port,
|
|
|
|
iotask);
|
2012-09-25 18:23:04 -05:00
|
|
|
if result::is_err(&connect_result) {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"CLIENT: failed to connect");
|
2012-09-25 18:23:04 -05:00
|
|
|
let err_data = result::get_err(&connect_result);
|
2012-08-26 18:54:31 -05:00
|
|
|
Err(err_data)
|
2012-05-02 18:22:44 -05:00
|
|
|
}
|
2012-05-09 17:07:54 -05:00
|
|
|
else {
|
2012-09-10 19:50:48 -05:00
|
|
|
let sock = result::unwrap(move connect_result);
|
2012-08-23 17:44:57 -05:00
|
|
|
let resp_bytes = str::to_bytes(resp);
|
2012-09-19 20:32:13 -05:00
|
|
|
tcp_write_single(&sock, resp_bytes);
|
2012-05-22 16:07:42 -05:00
|
|
|
let read_result = sock.read(0u);
|
2012-06-22 20:26:25 -05:00
|
|
|
if read_result.is_err() {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"CLIENT: failure to read");
|
2012-08-26 18:54:31 -05:00
|
|
|
Ok(~"")
|
2012-05-02 18:22:44 -05:00
|
|
|
}
|
2012-05-09 17:07:54 -05:00
|
|
|
else {
|
|
|
|
client_ch.send(str::from_bytes(read_result.get()));
|
|
|
|
let ret_val = client_ch.recv();
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("CLIENT: after client_ch recv ret: '%s'",
|
|
|
|
ret_val));
|
2012-08-26 18:54:31 -05:00
|
|
|
Ok(ret_val)
|
2012-05-02 18:22:44 -05:00
|
|
|
}
|
|
|
|
}
|
2012-05-05 11:30:56 -05:00
|
|
|
}
|
|
|
|
|
2012-09-19 20:32:13 -05:00
|
|
|
fn tcp_write_single(sock: &TcpSocket, val: ~[u8]) {
|
2012-05-21 11:58:33 -05:00
|
|
|
let write_result_future = sock.write_future(val);
|
|
|
|
let write_result = write_result_future.get();
|
2012-09-25 18:23:04 -05:00
|
|
|
if result::is_err(&write_result) {
|
2012-07-14 00:57:48 -05:00
|
|
|
log(debug, ~"tcp_write_single: write failed!");
|
2012-09-25 18:23:04 -05:00
|
|
|
let err_data = result::get_err(&write_result);
|
2012-08-22 19:24:52 -05:00
|
|
|
log(debug, fmt!("tcp_write_single err name: %s msg: %s",
|
|
|
|
err_data.err_name, err_data.err_msg));
|
2012-05-09 17:07:54 -05:00
|
|
|
// meh. torn on what to do here.
|
2012-07-14 00:57:48 -05:00
|
|
|
fail ~"tcp_write_single failed";
|
2012-05-05 11:30:56 -05:00
|
|
|
}
|
2012-04-26 16:30:22 -05:00
|
|
|
}
|
2012-05-05 11:30:56 -05:00
|
|
|
}
|