2011-05-31 19:44:54 -05:00
|
|
|
// -*- c++ -*-
|
2010-08-27 20:26:36 -05:00
|
|
|
#ifndef RUST_KERNEL_H
|
|
|
|
#define RUST_KERNEL_H
|
|
|
|
|
|
|
|
/**
|
2010-09-07 20:39:07 -05:00
|
|
|
* A handle object for Rust tasks. We need a reference to the message queue
|
|
|
|
* of the referent's domain which we can safely hang on to since it's a
|
|
|
|
* kernel object. We use the referent reference as a label we stash in
|
|
|
|
* messages sent via this proxy.
|
2010-08-27 20:26:36 -05:00
|
|
|
*/
|
2010-09-07 20:39:07 -05:00
|
|
|
|
|
|
|
class rust_kernel;
|
2010-09-15 13:56:45 -05:00
|
|
|
class rust_message;
|
2010-09-07 20:39:07 -05:00
|
|
|
|
|
|
|
template <typename T> class
|
|
|
|
rust_handle :
|
|
|
|
public rust_cond,
|
|
|
|
public rc_base<rust_handle<T> >,
|
|
|
|
public kernel_owned<rust_handle<T> > {
|
|
|
|
public:
|
|
|
|
rust_kernel *kernel;
|
|
|
|
rust_message_queue *message_queue;
|
|
|
|
T *_referent;
|
|
|
|
T * referent() {
|
|
|
|
return _referent;
|
|
|
|
}
|
|
|
|
rust_handle(rust_kernel *kernel,
|
|
|
|
rust_message_queue *message_queue,
|
|
|
|
T *referent) :
|
|
|
|
kernel(kernel),
|
|
|
|
message_queue(message_queue),
|
|
|
|
_referent(referent) {
|
|
|
|
// Nop.
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2011-06-24 18:50:06 -05:00
|
|
|
class rust_task_thread;
|
|
|
|
|
|
|
|
|
2010-09-07 20:39:07 -05:00
|
|
|
/**
|
|
|
|
* A global object shared by all thread domains. Most of the data structures
|
|
|
|
* in this class are synchronized since they are accessed from multiple
|
|
|
|
* threads.
|
|
|
|
*/
|
|
|
|
class rust_kernel : public rust_thread {
|
|
|
|
memory_region *_region;
|
2010-08-27 20:26:36 -05:00
|
|
|
rust_log _log;
|
2010-09-07 20:39:07 -05:00
|
|
|
rust_srv *_srv;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Task proxy objects are kernel owned handles to Rust objects.
|
|
|
|
*/
|
|
|
|
hash_map<rust_task *, rust_handle<rust_task> *> _task_handles;
|
|
|
|
hash_map<rust_port *, rust_handle<rust_port> *> _port_handles;
|
2011-06-28 14:15:41 -05:00
|
|
|
hash_map<rust_scheduler *, rust_handle<rust_scheduler> *> _sched_handles;
|
2010-09-07 20:39:07 -05:00
|
|
|
|
|
|
|
template<class T> void free_handles(hash_map<T*, rust_handle<T>* > &map);
|
|
|
|
|
|
|
|
void run();
|
|
|
|
void start_kernel_loop();
|
2010-09-08 21:13:49 -05:00
|
|
|
bool _interrupt_kernel_loop;
|
2010-09-07 20:39:07 -05:00
|
|
|
|
2010-09-08 21:13:49 -05:00
|
|
|
lock_and_signal _kernel_lock;
|
2010-09-07 20:39:07 -05:00
|
|
|
|
2010-09-08 01:37:51 -05:00
|
|
|
void terminate_kernel_loop();
|
|
|
|
void pump_message_queues();
|
|
|
|
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_handle<rust_scheduler> *
|
|
|
|
internal_get_sched_handle(rust_scheduler *sched);
|
2010-09-08 17:16:14 -05:00
|
|
|
|
2011-06-28 14:54:41 -05:00
|
|
|
array_list<rust_task_thread *> threads;
|
|
|
|
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_scheduler *create_scheduler(const char *name);
|
|
|
|
void destroy_scheduler();
|
2010-09-07 20:39:07 -05:00
|
|
|
|
2011-06-24 17:56:12 -05:00
|
|
|
public:
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_scheduler *sched;
|
2011-06-24 18:50:06 -05:00
|
|
|
lock_and_signal scheduler_lock;
|
2010-09-07 20:39:07 -05:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Message queues are kernel objects and are associated with domains.
|
|
|
|
* Their lifetime is not bound to the lifetime of a domain and in fact
|
|
|
|
* live on after their associated domain has died. This way we can safely
|
|
|
|
* communicate with domains that may have died.
|
|
|
|
*
|
|
|
|
*/
|
2010-09-08 17:16:14 -05:00
|
|
|
indexed_list<rust_message_queue> message_queues;
|
2010-09-07 20:39:07 -05:00
|
|
|
|
2011-06-28 14:15:41 -05:00
|
|
|
rust_handle<rust_scheduler> *get_sched_handle(rust_scheduler *sched);
|
2010-09-07 20:39:07 -05:00
|
|
|
rust_handle<rust_task> *get_task_handle(rust_task *task);
|
|
|
|
rust_handle<rust_port> *get_port_handle(rust_port *port);
|
|
|
|
|
2010-08-27 20:26:36 -05:00
|
|
|
rust_kernel(rust_srv *srv);
|
2010-09-07 20:39:07 -05:00
|
|
|
|
|
|
|
bool is_deadlocked();
|
|
|
|
|
2010-09-15 13:56:45 -05:00
|
|
|
void signal_kernel_lock();
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Notifies the kernel whenever a message has been enqueued . This gives
|
|
|
|
* the kernel the opportunity to wake up the message pump thread if the
|
|
|
|
* message queue is not associated.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
notify_message_enqueued(rust_message_queue *queue, rust_message *message);
|
|
|
|
|
2011-06-28 14:15:41 -05:00
|
|
|
void log_all_scheduler_state();
|
2011-04-19 05:21:57 -05:00
|
|
|
void log(uint32_t level, char const *fmt, ...);
|
2010-08-27 20:26:36 -05:00
|
|
|
virtual ~rust_kernel();
|
2010-09-07 20:39:07 -05:00
|
|
|
|
|
|
|
void *malloc(size_t size);
|
|
|
|
void free(void *mem);
|
2011-06-24 17:56:12 -05:00
|
|
|
|
2011-06-28 13:34:20 -05:00
|
|
|
// FIXME: this should go away
|
2011-06-28 14:15:41 -05:00
|
|
|
inline rust_scheduler *get_scheduler() const { return sched; }
|
2011-06-24 18:50:06 -05:00
|
|
|
|
|
|
|
int start_task_threads(int num_threads);
|
2011-06-28 13:34:20 -05:00
|
|
|
|
|
|
|
#ifdef __WIN32__
|
|
|
|
void win32_require(LPCTSTR fn, BOOL ok);
|
|
|
|
#endif
|
2010-08-27 20:26:36 -05:00
|
|
|
};
|
|
|
|
|
2011-06-24 18:50:06 -05:00
|
|
|
class rust_task_thread : public rust_thread {
|
|
|
|
int id;
|
|
|
|
rust_kernel *owner;
|
|
|
|
|
|
|
|
public:
|
|
|
|
rust_task_thread(int id, rust_kernel *owner);
|
|
|
|
|
|
|
|
virtual void run();
|
|
|
|
};
|
|
|
|
|
2010-08-27 20:26:36 -05:00
|
|
|
#endif /* RUST_KERNEL_H */
|