2017-12-29 14:33:04 -06:00
|
|
|
mod ptr;
|
2018-07-29 07:16:07 -05:00
|
|
|
mod comments;
|
|
|
|
mod strings;
|
|
|
|
mod numbers;
|
2017-12-29 15:48:47 -06:00
|
|
|
mod classes;
|
|
|
|
|
2018-07-29 07:16:07 -05:00
|
|
|
use {
|
|
|
|
TextUnit,
|
|
|
|
SyntaxKind::{self, *},
|
|
|
|
};
|
2017-12-30 06:56:52 -06:00
|
|
|
|
2018-07-29 07:16:07 -05:00
|
|
|
use self::{
|
|
|
|
ptr::Ptr,
|
|
|
|
classes::*,
|
|
|
|
numbers::scan_number,
|
|
|
|
strings::{
|
|
|
|
is_string_literal_start, scan_byte_char_or_string, scan_char,
|
|
|
|
scan_raw_string, scan_string},
|
|
|
|
comments::{scan_comment, scan_shebang},
|
|
|
|
};
|
2017-12-31 04:32:00 -06:00
|
|
|
|
2018-07-29 07:16:07 -05:00
|
|
|
/// A token of Rust source.
|
|
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
|
|
|
pub struct Token {
|
|
|
|
/// The kind of token.
|
|
|
|
pub kind: SyntaxKind,
|
|
|
|
/// The length of the token.
|
|
|
|
pub len: TextUnit,
|
|
|
|
}
|
2017-12-31 07:42:22 -06:00
|
|
|
|
2018-01-27 19:29:14 -06:00
|
|
|
/// Break a string up into its component tokens
|
2017-12-31 08:54:33 -06:00
|
|
|
pub fn tokenize(text: &str) -> Vec<Token> {
|
|
|
|
let mut text = text;
|
|
|
|
let mut acc = Vec::new();
|
|
|
|
while !text.is_empty() {
|
|
|
|
let token = next_token(text);
|
|
|
|
acc.push(token);
|
|
|
|
let len: u32 = token.len.into();
|
|
|
|
text = &text[len as usize..];
|
|
|
|
}
|
|
|
|
acc
|
|
|
|
}
|
2018-07-29 07:16:07 -05:00
|
|
|
|
2018-01-27 19:29:14 -06:00
|
|
|
/// Get the next token from a string
|
2017-12-29 14:33:04 -06:00
|
|
|
pub fn next_token(text: &str) -> Token {
|
|
|
|
assert!(!text.is_empty());
|
|
|
|
let mut ptr = Ptr::new(text);
|
|
|
|
let c = ptr.bump().unwrap();
|
|
|
|
let kind = next_token_inner(c, &mut ptr);
|
|
|
|
let len = ptr.into_len();
|
|
|
|
Token { kind, len }
|
|
|
|
}
|
|
|
|
|
|
|
|
fn next_token_inner(c: char, ptr: &mut Ptr) -> SyntaxKind {
|
2017-12-29 15:48:47 -06:00
|
|
|
if is_whitespace(c) {
|
|
|
|
ptr.bump_while(is_whitespace);
|
|
|
|
return WHITESPACE;
|
|
|
|
}
|
2017-12-29 14:33:04 -06:00
|
|
|
|
2017-12-31 07:42:22 -06:00
|
|
|
match c {
|
|
|
|
'#' => if scan_shebang(ptr) {
|
|
|
|
return SHEBANG;
|
2018-01-27 17:31:23 -06:00
|
|
|
},
|
2017-12-31 07:42:22 -06:00
|
|
|
'/' => if let Some(kind) = scan_comment(ptr) {
|
|
|
|
return kind;
|
2018-01-27 17:31:23 -06:00
|
|
|
},
|
2017-12-31 07:42:22 -06:00
|
|
|
_ => (),
|
|
|
|
}
|
|
|
|
|
|
|
|
let ident_start = is_ident_start(c) && !is_string_literal_start(c, ptr.next(), ptr.nnext());
|
|
|
|
if ident_start {
|
|
|
|
return scan_ident(c, ptr);
|
|
|
|
}
|
|
|
|
|
2017-12-30 06:22:40 -06:00
|
|
|
if is_dec_digit(c) {
|
2017-12-31 01:41:42 -06:00
|
|
|
let kind = scan_number(c, ptr);
|
|
|
|
scan_literal_suffix(ptr);
|
|
|
|
return kind;
|
2017-12-30 06:22:40 -06:00
|
|
|
}
|
|
|
|
|
2017-12-30 07:30:37 -06:00
|
|
|
// One-byte tokens.
|
|
|
|
match c {
|
|
|
|
';' => return SEMI,
|
|
|
|
',' => return COMMA,
|
|
|
|
'(' => return L_PAREN,
|
|
|
|
')' => return R_PAREN,
|
|
|
|
'{' => return L_CURLY,
|
|
|
|
'}' => return R_CURLY,
|
|
|
|
'[' => return L_BRACK,
|
|
|
|
']' => return R_BRACK,
|
2017-12-30 11:56:54 -06:00
|
|
|
'<' => return L_ANGLE,
|
|
|
|
'>' => return R_ANGLE,
|
2017-12-30 07:30:37 -06:00
|
|
|
'@' => return AT,
|
|
|
|
'#' => return POUND,
|
|
|
|
'~' => return TILDE,
|
|
|
|
'?' => return QUESTION,
|
|
|
|
'$' => return DOLLAR,
|
2017-12-31 06:29:09 -06:00
|
|
|
'&' => return AMPERSAND,
|
|
|
|
'|' => return PIPE,
|
|
|
|
'+' => return PLUS,
|
|
|
|
'*' => return STAR,
|
|
|
|
'/' => return SLASH,
|
|
|
|
'^' => return CARET,
|
|
|
|
'%' => return PERCENT,
|
2017-12-30 09:25:37 -06:00
|
|
|
|
|
|
|
// Multi-byte tokens.
|
2018-01-27 17:31:23 -06:00
|
|
|
'.' => {
|
|
|
|
return match (ptr.next(), ptr.nnext()) {
|
|
|
|
(Some('.'), Some('.')) => {
|
|
|
|
ptr.bump();
|
|
|
|
ptr.bump();
|
|
|
|
DOTDOTDOT
|
|
|
|
}
|
|
|
|
(Some('.'), Some('=')) => {
|
|
|
|
ptr.bump();
|
|
|
|
ptr.bump();
|
|
|
|
DOTDOTEQ
|
|
|
|
}
|
|
|
|
(Some('.'), _) => {
|
|
|
|
ptr.bump();
|
|
|
|
DOTDOT
|
|
|
|
}
|
|
|
|
_ => DOT,
|
2018-07-29 07:16:07 -05:00
|
|
|
};
|
2018-01-27 17:31:23 -06:00
|
|
|
}
|
|
|
|
':' => {
|
|
|
|
return match ptr.next() {
|
|
|
|
Some(':') => {
|
|
|
|
ptr.bump();
|
|
|
|
COLONCOLON
|
|
|
|
}
|
|
|
|
_ => COLON,
|
2018-07-29 07:16:07 -05:00
|
|
|
};
|
2018-01-27 17:31:23 -06:00
|
|
|
}
|
|
|
|
'=' => {
|
|
|
|
return match ptr.next() {
|
|
|
|
Some('=') => {
|
|
|
|
ptr.bump();
|
|
|
|
EQEQ
|
|
|
|
}
|
|
|
|
Some('>') => {
|
|
|
|
ptr.bump();
|
|
|
|
FAT_ARROW
|
|
|
|
}
|
|
|
|
_ => EQ,
|
2018-07-29 07:16:07 -05:00
|
|
|
};
|
2018-01-27 17:31:23 -06:00
|
|
|
}
|
|
|
|
'!' => {
|
|
|
|
return match ptr.next() {
|
|
|
|
Some('=') => {
|
|
|
|
ptr.bump();
|
|
|
|
NEQ
|
|
|
|
}
|
|
|
|
_ => EXCL,
|
2018-07-29 07:16:07 -05:00
|
|
|
};
|
2018-01-27 17:31:23 -06:00
|
|
|
}
|
|
|
|
'-' => {
|
|
|
|
return if ptr.next_is('>') {
|
2017-12-30 09:25:37 -06:00
|
|
|
ptr.bump();
|
2018-01-27 17:31:23 -06:00
|
|
|
THIN_ARROW
|
|
|
|
} else {
|
|
|
|
MINUS
|
2018-07-29 07:16:07 -05:00
|
|
|
};
|
2018-01-27 17:31:23 -06:00
|
|
|
}
|
2017-12-31 01:41:42 -06:00
|
|
|
|
2017-12-31 04:32:00 -06:00
|
|
|
// If the character is an ident start not followed by another single
|
|
|
|
// quote, then this is a lifetime name:
|
2018-01-27 17:31:23 -06:00
|
|
|
'\'' => {
|
|
|
|
return if ptr.next_is_p(is_ident_start) && !ptr.nnext_is('\'') {
|
2017-12-31 04:32:00 -06:00
|
|
|
ptr.bump();
|
2018-01-27 17:31:23 -06:00
|
|
|
while ptr.next_is_p(is_ident_continue) {
|
|
|
|
ptr.bump();
|
|
|
|
}
|
|
|
|
// lifetimes shouldn't end with a single quote
|
|
|
|
// if we find one, then this is an invalid character literal
|
|
|
|
if ptr.next_is('\'') {
|
|
|
|
ptr.bump();
|
|
|
|
return CHAR; // TODO: error reporting
|
|
|
|
}
|
|
|
|
LIFETIME
|
|
|
|
} else {
|
|
|
|
scan_char(ptr);
|
|
|
|
scan_literal_suffix(ptr);
|
|
|
|
CHAR
|
2018-01-27 17:36:02 -06:00
|
|
|
};
|
2018-01-27 17:31:23 -06:00
|
|
|
}
|
2017-12-31 05:10:18 -06:00
|
|
|
'b' => {
|
|
|
|
let kind = scan_byte_char_or_string(ptr);
|
|
|
|
scan_literal_suffix(ptr);
|
2018-01-27 17:31:23 -06:00
|
|
|
return kind;
|
|
|
|
}
|
2017-12-31 06:14:47 -06:00
|
|
|
'"' => {
|
|
|
|
scan_string(ptr);
|
|
|
|
scan_literal_suffix(ptr);
|
|
|
|
return STRING;
|
|
|
|
}
|
|
|
|
'r' => {
|
|
|
|
scan_raw_string(ptr);
|
|
|
|
scan_literal_suffix(ptr);
|
|
|
|
return RAW_STRING;
|
|
|
|
}
|
2017-12-30 07:30:37 -06:00
|
|
|
_ => (),
|
|
|
|
}
|
2017-12-29 15:53:06 -06:00
|
|
|
ERROR
|
2017-12-29 14:33:04 -06:00
|
|
|
}
|
|
|
|
|
2017-12-30 06:22:40 -06:00
|
|
|
fn scan_ident(c: char, ptr: &mut Ptr) -> SyntaxKind {
|
|
|
|
let is_single_letter = match ptr.next() {
|
|
|
|
None => true,
|
|
|
|
Some(c) if !is_ident_continue(c) => true,
|
|
|
|
_ => false,
|
|
|
|
};
|
|
|
|
if is_single_letter {
|
|
|
|
return if c == '_' { UNDERSCORE } else { IDENT };
|
|
|
|
}
|
|
|
|
ptr.bump_while(is_ident_continue);
|
2018-07-29 07:16:07 -05:00
|
|
|
if let Some(kind) = SyntaxKind::from_keyword(ptr.current_token_text()) {
|
2018-01-01 09:58:46 -06:00
|
|
|
return kind;
|
|
|
|
}
|
2017-12-30 06:22:40 -06:00
|
|
|
IDENT
|
|
|
|
}
|
|
|
|
|
2017-12-31 01:41:42 -06:00
|
|
|
fn scan_literal_suffix(ptr: &mut Ptr) {
|
2017-12-31 02:15:27 -06:00
|
|
|
if ptr.next_is_p(is_ident_start) {
|
|
|
|
ptr.bump();
|
|
|
|
}
|
|
|
|
ptr.bump_while(is_ident_continue);
|
2017-12-31 01:41:42 -06:00
|
|
|
}
|