2011-05-31 17:44:54 -07:00
|
|
|
// -*- c++ -*-
|
2010-08-27 18:26:36 -07:00
|
|
|
#ifndef RUST_KERNEL_H
|
|
|
|
#define RUST_KERNEL_H
|
|
|
|
|
2012-04-03 16:02:03 -07:00
|
|
|
#include "rust_globals.h"
|
|
|
|
|
2012-02-07 16:11:57 -08:00
|
|
|
#include <map>
|
2012-02-27 13:36:54 -08:00
|
|
|
#include <vector>
|
2012-04-03 17:33:25 -05:00
|
|
|
|
2011-09-23 11:42:20 -07:00
|
|
|
#include "memory_region.h"
|
|
|
|
#include "rust_log.h"
|
2012-03-30 13:54:37 -07:00
|
|
|
#include "rust_sched_reaper.h"
|
2012-04-02 22:18:01 -05:00
|
|
|
#include "util/hash_map.h"
|
2011-09-23 11:42:20 -07:00
|
|
|
|
2012-02-03 12:47:01 -08:00
|
|
|
struct rust_task_thread;
|
2012-02-09 14:08:24 +08:00
|
|
|
class rust_scheduler;
|
2012-04-02 22:18:01 -05:00
|
|
|
class rust_port;
|
|
|
|
|
|
|
|
typedef intptr_t rust_sched_id;
|
|
|
|
typedef intptr_t rust_task_id;
|
|
|
|
typedef intptr_t rust_port_id;
|
2011-09-23 11:42:20 -07:00
|
|
|
|
2012-02-07 16:11:57 -08:00
|
|
|
typedef std::map<rust_sched_id, rust_scheduler*> sched_map;
|
|
|
|
|
2012-04-01 18:42:28 -07:00
|
|
|
class rust_sched_driver;
|
|
|
|
class rust_sched_launcher_factory;
|
|
|
|
|
2010-09-07 18:39:07 -07:00
|
|
|
/**
|
|
|
|
* A global object shared by all thread domains. Most of the data structures
|
|
|
|
* in this class are synchronized since they are accessed from multiple
|
|
|
|
* threads.
|
|
|
|
*/
|
2011-07-29 11:00:44 -07:00
|
|
|
class rust_kernel {
|
2011-07-18 12:02:26 -07:00
|
|
|
memory_region _region;
|
2010-08-27 18:26:36 -07:00
|
|
|
rust_log _log;
|
2011-07-23 19:03:02 -07:00
|
|
|
|
2012-02-04 00:03:45 -08:00
|
|
|
// The next task id
|
2012-02-03 17:26:54 -08:00
|
|
|
rust_task_id max_task_id;
|
|
|
|
|
2012-03-14 17:24:19 -07:00
|
|
|
// Protects max_port_id and port_table
|
|
|
|
lock_and_signal port_lock;
|
|
|
|
// The next port id
|
|
|
|
rust_task_id max_port_id;
|
|
|
|
hash_map<rust_port_id, rust_port *> port_table;
|
|
|
|
|
2012-02-03 17:26:54 -08:00
|
|
|
lock_and_signal rval_lock;
|
2012-01-12 22:17:21 -08:00
|
|
|
int rval;
|
|
|
|
|
2012-02-27 13:36:54 -08:00
|
|
|
// Protects max_sched_id and sched_table, join_list
|
2012-02-04 14:54:10 -08:00
|
|
|
lock_and_signal sched_lock;
|
2012-02-27 13:36:54 -08:00
|
|
|
// The next scheduler id
|
2012-02-07 16:11:57 -08:00
|
|
|
rust_sched_id max_sched_id;
|
2012-02-27 13:36:54 -08:00
|
|
|
// A map from scheduler ids to schedulers. When this is empty
|
|
|
|
// the kernel terminates
|
2012-02-07 16:11:57 -08:00
|
|
|
sched_map sched_table;
|
2012-02-27 13:36:54 -08:00
|
|
|
// A list of scheduler ids that are ready to exit
|
|
|
|
std::vector<rust_sched_id> join_list;
|
2012-02-04 14:54:10 -08:00
|
|
|
|
2012-03-30 13:54:37 -07:00
|
|
|
rust_sched_reaper sched_reaper;
|
2012-04-01 18:42:28 -07:00
|
|
|
// The single-threaded scheduler that uses the main thread
|
|
|
|
rust_sched_id osmain_scheduler;
|
|
|
|
// Runs the single-threaded scheduler that executes tasks
|
|
|
|
// on the main thread
|
|
|
|
rust_sched_driver *osmain_driver;
|
2012-03-30 13:54:37 -07:00
|
|
|
|
2012-04-06 17:03:00 -07:00
|
|
|
// An atomically updated count of the live, 'non-weak' tasks
|
|
|
|
uintptr_t non_weak_tasks;
|
|
|
|
// Protects weak_task_chans
|
|
|
|
lock_and_signal weak_task_lock;
|
|
|
|
// A list of weak tasks that need to be told when to exit
|
|
|
|
std::vector<rust_port_id> weak_task_chans;
|
|
|
|
|
2012-04-03 18:54:57 -07:00
|
|
|
rust_scheduler* get_scheduler_by_id_nolock(rust_sched_id id);
|
2012-04-06 17:03:00 -07:00
|
|
|
void end_weak_tasks();
|
2012-03-30 13:54:37 -07:00
|
|
|
|
2012-04-11 22:14:16 -07:00
|
|
|
// Used to communicate with the process-side, global libuv loop
|
|
|
|
uintptr_t global_loop_chan;
|
|
|
|
void** global_async_handle;
|
|
|
|
|
2011-08-08 13:38:20 -07:00
|
|
|
public:
|
2011-07-27 14:34:39 -07:00
|
|
|
struct rust_env *env;
|
|
|
|
|
2012-04-01 22:18:40 -05:00
|
|
|
rust_kernel(rust_env *env);
|
2010-09-15 11:56:45 -07:00
|
|
|
|
2011-04-19 12:21:57 +02:00
|
|
|
void log(uint32_t level, char const *fmt, ...);
|
2011-07-06 15:06:30 -07:00
|
|
|
void fatal(char const *fmt, ...);
|
2010-09-07 18:39:07 -07:00
|
|
|
|
2011-07-18 12:02:26 -07:00
|
|
|
void *malloc(size_t size, const char *tag);
|
2011-07-05 22:44:22 -07:00
|
|
|
void *realloc(void *mem, size_t size);
|
2010-09-07 18:39:07 -07:00
|
|
|
void free(void *mem);
|
2012-02-27 15:42:22 -08:00
|
|
|
memory_region *region() { return &_region; }
|
2011-06-24 15:56:12 -07:00
|
|
|
|
2011-08-10 12:57:53 -07:00
|
|
|
void fail();
|
|
|
|
|
2012-02-06 21:06:12 -08:00
|
|
|
rust_sched_id create_scheduler(size_t num_threads);
|
2012-04-01 18:42:28 -07:00
|
|
|
rust_sched_id create_scheduler(rust_sched_launcher_factory *launchfac,
|
|
|
|
size_t num_threads, bool allow_exit);
|
2012-02-06 21:06:12 -08:00
|
|
|
rust_scheduler* get_scheduler_by_id(rust_sched_id id);
|
2012-02-04 14:54:10 -08:00
|
|
|
// Called by a scheduler to indicate that it is terminating
|
2012-02-06 21:06:12 -08:00
|
|
|
void release_scheduler_id(rust_sched_id id);
|
2012-03-30 13:54:37 -07:00
|
|
|
void wait_for_schedulers();
|
2012-04-01 18:42:28 -07:00
|
|
|
int run();
|
2011-06-28 11:34:20 -07:00
|
|
|
|
|
|
|
#ifdef __WIN32__
|
|
|
|
void win32_require(LPCTSTR fn, BOOL ok);
|
|
|
|
#endif
|
2011-07-23 14:01:43 -07:00
|
|
|
|
2012-03-14 20:55:57 -07:00
|
|
|
rust_task_id generate_task_id();
|
2012-02-03 15:45:59 -08:00
|
|
|
|
2012-03-14 17:24:19 -07:00
|
|
|
rust_port_id register_port(rust_port *port);
|
|
|
|
rust_port *get_port_by_id(rust_port_id id);
|
|
|
|
void release_port_id(rust_port_id tid);
|
|
|
|
|
2012-01-12 22:17:21 -08:00
|
|
|
void set_exit_status(int code);
|
2012-04-03 14:03:27 -07:00
|
|
|
|
|
|
|
rust_sched_id osmain_sched_id() { return osmain_scheduler; }
|
2012-04-06 17:03:00 -07:00
|
|
|
|
|
|
|
void register_task();
|
|
|
|
void unregister_task();
|
|
|
|
void weaken_task(rust_port_id chan);
|
|
|
|
void unweaken_task(rust_port_id chan);
|
|
|
|
|
|
|
|
bool send_to_port(rust_port_id chan, void *sptr);
|
2012-04-11 22:14:16 -07:00
|
|
|
|
|
|
|
uintptr_t* get_global_loop() { return &global_loop_chan; }
|
|
|
|
void** get_global_async_handle() { return global_async_handle; }
|
|
|
|
void set_global_async_handle(void* handle) {
|
|
|
|
*global_async_handle = handle; }
|
2010-08-27 18:26:36 -07:00
|
|
|
};
|
|
|
|
|
2012-04-02 22:18:01 -05:00
|
|
|
template <typename T> struct kernel_owned {
|
|
|
|
inline void *operator new(size_t size, rust_kernel *kernel,
|
2012-04-03 04:37:00 -05:00
|
|
|
const char *tag) {
|
|
|
|
return kernel->malloc(size, tag);
|
|
|
|
}
|
2012-04-02 22:18:01 -05:00
|
|
|
|
|
|
|
void operator delete(void *ptr) {
|
|
|
|
((T *)ptr)->kernel->free(ptr);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2010-08-27 18:26:36 -07:00
|
|
|
#endif /* RUST_KERNEL_H */
|