rust/src/libstd/rt/test.rs
Alex Crichton 201cab84e8 Move rust's uv implementation to its own crate
There are a few reasons that this is a desirable move to take:

1. Proof of concept that a third party event loop is possible
2. Clear separation of responsibility between rt::io and the uv-backend
3. Enforce in the future that the event loop is "pluggable" and replacable

Here's a quick summary of the points of this pull request which make this
possible:

* Two new lang items were introduced: event_loop, and event_loop_factory.
  The idea of a "factory" is to define a function which can be called with no
  arguments and will return the new event loop as a trait object. This factory
  is emitted to the crate map when building an executable. The factory doesn't
  have to exist, and when it doesn't then an empty slot is in the crate map and
  a basic event loop with no I/O support is provided to the runtime.

* When building an executable, then the rustuv crate will be linked by default
  (providing a default implementation of the event loop) via a similar method to
  injecting a dependency on libstd. This is currently the only location where
  the rustuv crate is ever linked.

* There is a new #[no_uv] attribute (implied by #[no_std]) which denies
  implicitly linking to rustuv by default

Closes #5019
2013-10-29 08:39:22 -07:00

444 lines
13 KiB
Rust

// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr};
use cell::Cell;
use clone::Clone;
use container::Container;
use iter::{Iterator, range};
use libc;
use option::{Some, None};
use os;
use path::GenericPath;
use path::Path;
use rand::Rng;
use rand;
use result::{Result, Ok, Err};
use rt::basic;
use rt::comm::oneshot;
use rt::new_event_loop;
use rt::sched::Scheduler;
use rt::sleeper_list::SleeperList;
use rt::task::Task;
use rt::task::UnwindResult;
use rt::thread::Thread;
use rt::work_queue::WorkQueue;
use unstable::{run_in_bare_thread};
use vec::{OwnedVector, MutableVector, ImmutableVector};
pub fn new_test_uv_sched() -> Scheduler {
let queue = WorkQueue::new();
let queues = ~[queue.clone()];
let mut sched = Scheduler::new(new_event_loop(),
queue,
queues,
SleeperList::new());
// Don't wait for the Shutdown message
sched.no_sleep = true;
return sched;
}
pub fn new_test_sched() -> Scheduler {
let queue = WorkQueue::new();
let queues = ~[queue.clone()];
let mut sched = Scheduler::new(basic::event_loop(),
queue,
queues,
SleeperList::new());
// Don't wait for the Shutdown message
sched.no_sleep = true;
return sched;
}
pub fn run_in_uv_task(f: ~fn()) {
let f = Cell::new(f);
do run_in_bare_thread {
run_in_uv_task_core(f.take());
}
}
pub fn run_in_newsched_task(f: ~fn()) {
let f = Cell::new(f);
do run_in_bare_thread {
run_in_newsched_task_core(f.take());
}
}
pub fn run_in_uv_task_core(f: ~fn()) {
use rt::sched::Shutdown;
let mut sched = ~new_test_uv_sched();
let exit_handle = Cell::new(sched.make_handle());
let on_exit: ~fn(UnwindResult) = |exit_status| {
exit_handle.take().send(Shutdown);
rtassert!(exit_status.is_success());
};
let mut task = ~Task::new_root(&mut sched.stack_pool, None, f);
task.death.on_exit = Some(on_exit);
sched.bootstrap(task);
}
pub fn run_in_newsched_task_core(f: ~fn()) {
use rt::sched::Shutdown;
let mut sched = ~new_test_sched();
let exit_handle = Cell::new(sched.make_handle());
let on_exit: ~fn(UnwindResult) = |exit_status| {
exit_handle.take().send(Shutdown);
rtassert!(exit_status.is_success());
};
let mut task = ~Task::new_root(&mut sched.stack_pool, None, f);
task.death.on_exit = Some(on_exit);
sched.bootstrap(task);
}
#[cfg(target_os="macos")]
#[allow(non_camel_case_types)]
mod darwin_fd_limit {
/*!
* darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the
* rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low
* for our multithreaded scheduler testing, depending on the number of cores available.
*
* This fixes issue #7772.
*/
use libc;
type rlim_t = libc::uint64_t;
struct rlimit {
rlim_cur: rlim_t,
rlim_max: rlim_t
}
#[nolink]
extern {
// name probably doesn't need to be mut, but the C function doesn't specify const
fn sysctl(name: *mut libc::c_int, namelen: libc::c_uint,
oldp: *mut libc::c_void, oldlenp: *mut libc::size_t,
newp: *mut libc::c_void, newlen: libc::size_t) -> libc::c_int;
fn getrlimit(resource: libc::c_int, rlp: *mut rlimit) -> libc::c_int;
fn setrlimit(resource: libc::c_int, rlp: *rlimit) -> libc::c_int;
}
static CTL_KERN: libc::c_int = 1;
static KERN_MAXFILESPERPROC: libc::c_int = 29;
static RLIMIT_NOFILE: libc::c_int = 8;
pub unsafe fn raise_fd_limit() {
#[fixed_stack_segment]; #[inline(never)];
// The strategy here is to fetch the current resource limits, read the kern.maxfilesperproc
// sysctl value, and bump the soft resource limit for maxfiles up to the sysctl value.
use ptr::{to_unsafe_ptr, to_mut_unsafe_ptr, mut_null};
use mem::size_of_val;
use os::last_os_error;
// Fetch the kern.maxfilesperproc value
let mut mib: [libc::c_int, ..2] = [CTL_KERN, KERN_MAXFILESPERPROC];
let mut maxfiles: libc::c_int = 0;
let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t;
if sysctl(to_mut_unsafe_ptr(&mut mib[0]), 2,
to_mut_unsafe_ptr(&mut maxfiles) as *mut libc::c_void,
to_mut_unsafe_ptr(&mut size),
mut_null(), 0) != 0 {
let err = last_os_error();
error!("raise_fd_limit: error calling sysctl: {}", err);
return;
}
// Fetch the current resource limits
let mut rlim = rlimit{rlim_cur: 0, rlim_max: 0};
if getrlimit(RLIMIT_NOFILE, to_mut_unsafe_ptr(&mut rlim)) != 0 {
let err = last_os_error();
error!("raise_fd_limit: error calling getrlimit: {}", err);
return;
}
// Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit
rlim.rlim_cur = ::cmp::min(maxfiles as rlim_t, rlim.rlim_max);
// Set our newly-increased resource limit
if setrlimit(RLIMIT_NOFILE, to_unsafe_ptr(&rlim)) != 0 {
let err = last_os_error();
error!("raise_fd_limit: error calling setrlimit: {}", err);
return;
}
}
}
#[cfg(not(target_os="macos"))]
mod darwin_fd_limit {
pub unsafe fn raise_fd_limit() {}
}
#[doc(hidden)]
pub fn prepare_for_lots_of_tests() {
// Bump the fd limit on OS X. See darwin_fd_limit for an explanation.
unsafe { darwin_fd_limit::raise_fd_limit() }
}
/// Create more than one scheduler and run a function in a task
/// in one of the schedulers. The schedulers will stay alive
/// until the function `f` returns.
pub fn run_in_mt_newsched_task(f: ~fn()) {
use os;
use from_str::FromStr;
use rt::sched::Shutdown;
use rt::util;
// see comment in other function (raising fd limits)
prepare_for_lots_of_tests();
let f = Cell::new(f);
do run_in_bare_thread {
let nthreads = match os::getenv("RUST_RT_TEST_THREADS") {
Some(nstr) => FromStr::from_str(nstr).unwrap(),
None => {
if util::limit_thread_creation_due_to_osx_and_valgrind() {
1
} else {
// Using more threads than cores in test code
// to force the OS to preempt them frequently.
// Assuming that this help stress test concurrent types.
util::num_cpus() * 2
}
}
};
let sleepers = SleeperList::new();
let mut handles = ~[];
let mut scheds = ~[];
let mut work_queues = ~[];
for _ in range(0u, nthreads) {
let work_queue = WorkQueue::new();
work_queues.push(work_queue);
}
for i in range(0u, nthreads) {
let loop_ = new_event_loop();
let mut sched = ~Scheduler::new(loop_,
work_queues[i].clone(),
work_queues.clone(),
sleepers.clone());
let handle = sched.make_handle();
handles.push(handle);
scheds.push(sched);
}
let handles = Cell::new(handles);
let on_exit: ~fn(UnwindResult) = |exit_status| {
let mut handles = handles.take();
// Tell schedulers to exit
for handle in handles.mut_iter() {
handle.send(Shutdown);
}
rtassert!(exit_status.is_success());
};
let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool, None, f.take());
main_task.death.on_exit = Some(on_exit);
let mut threads = ~[];
let main_task = Cell::new(main_task);
let main_thread = {
let sched = scheds.pop();
let sched_cell = Cell::new(sched);
do Thread::start {
let sched = sched_cell.take();
sched.bootstrap(main_task.take());
}
};
threads.push(main_thread);
while !scheds.is_empty() {
let mut sched = scheds.pop();
let bootstrap_task = ~do Task::new_root(&mut sched.stack_pool, None) || {
rtdebug!("bootstrapping non-primary scheduler");
};
let bootstrap_task_cell = Cell::new(bootstrap_task);
let sched_cell = Cell::new(sched);
let thread = do Thread::start {
let sched = sched_cell.take();
sched.bootstrap(bootstrap_task_cell.take());
};
threads.push(thread);
}
// Wait for schedulers
for thread in threads.move_iter() {
thread.join();
}
}
}
/// Test tasks will abort on failure instead of unwinding
pub fn spawntask(f: ~fn()) {
Scheduler::run_task(Task::build_child(None, f));
}
/// Create a new task and run it right now. Aborts on failure
pub fn spawntask_later(f: ~fn()) {
Scheduler::run_task_later(Task::build_child(None, f));
}
pub fn spawntask_random(f: ~fn()) {
use rand::{Rand, rng};
let mut rng = rng();
let run_now: bool = Rand::rand(&mut rng);
if run_now {
spawntask(f)
} else {
spawntask_later(f)
}
}
pub fn spawntask_try(f: ~fn()) -> Result<(),()> {
let (port, chan) = oneshot();
let chan = Cell::new(chan);
let on_exit: ~fn(UnwindResult) = |exit_status| chan.take().send(exit_status);
let mut new_task = Task::build_root(None, f);
new_task.death.on_exit = Some(on_exit);
Scheduler::run_task(new_task);
let exit_status = port.recv();
if exit_status.is_success() { Ok(()) } else { Err(()) }
}
/// Spawn a new task in a new scheduler and return a thread handle.
pub fn spawntask_thread(f: ~fn()) -> Thread {
let f = Cell::new(f);
let thread = do Thread::start {
run_in_newsched_task_core(f.take());
};
return thread;
}
/// Get a ~Task for testing purposes other than actually scheduling it.
pub fn with_test_task(blk: ~fn(~Task) -> ~Task) {
do run_in_bare_thread {
let mut sched = ~new_test_sched();
let task = blk(~Task::new_root(&mut sched.stack_pool, None, ||{}));
cleanup_task(task);
}
}
/// Use to cleanup tasks created for testing but not "run".
pub fn cleanup_task(mut task: ~Task) {
task.destroyed = true;
}
/// Get a port number, starting at 9600, for use in tests
#[fixed_stack_segment] #[inline(never)]
pub fn next_test_port() -> u16 {
unsafe {
return rust_dbg_next_port(base_port() as libc::uintptr_t) as u16;
}
extern {
fn rust_dbg_next_port(base: libc::uintptr_t) -> libc::uintptr_t;
}
}
/// Get a temporary path which could be the location of a unix socket
#[fixed_stack_segment] #[inline(never)]
pub fn next_test_unix() -> Path {
os::tmpdir().join(rand::task_rng().gen_ascii_str(20))
}
/// Get a unique IPv4 localhost:port pair starting at 9600
pub fn next_test_ip4() -> SocketAddr {
SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: next_test_port() }
}
/// Get a unique IPv6 localhost:port pair starting at 9600
pub fn next_test_ip6() -> SocketAddr {
SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() }
}
/*
XXX: Welcome to MegaHack City.
The bots run multiple builds at the same time, and these builds
all want to use ports. This function figures out which workspace
it is running in and assigns a port range based on it.
*/
fn base_port() -> uint {
use os;
use str::StrSlice;
use vec::ImmutableVector;
let base = 9600u;
let range = 1000;
let bases = [
("32-opt", base + range * 1),
("32-noopt", base + range * 2),
("64-opt", base + range * 3),
("64-noopt", base + range * 4),
("64-opt-vg", base + range * 5),
("all-opt", base + range * 6),
("snap3", base + range * 7),
("dist", base + range * 8)
];
// FIXME (#9639): This needs to handle non-utf8 paths
let path = os::getcwd();
let path_s = path.as_str().unwrap();
let mut final_base = base;
for &(dir, base) in bases.iter() {
if path_s.contains(dir) {
final_base = base;
break;
}
}
return final_base;
}
/// Get a constant that represents the number of times to repeat
/// stress tests. Default 1.
pub fn stress_factor() -> uint {
use os::getenv;
use from_str::from_str;
match getenv("RUST_RT_STRESS") {
Some(val) => from_str::<uint>(val).unwrap(),
None => 1
}
}