2018-07-30 14:08:06 +03:00
|
|
|
mod classes;
|
2018-07-29 15:16:07 +03:00
|
|
|
mod comments;
|
|
|
|
mod numbers;
|
2018-07-30 14:08:06 +03:00
|
|
|
mod ptr;
|
|
|
|
mod strings;
|
2017-12-30 00:48:47 +03:00
|
|
|
|
2018-10-15 19:55:32 +03:00
|
|
|
use crate::{
|
2018-07-29 15:16:07 +03:00
|
|
|
SyntaxKind::{self, *},
|
2019-07-04 23:05:17 +03:00
|
|
|
TextUnit, T,
|
2018-07-29 15:16:07 +03:00
|
|
|
};
|
2017-12-30 15:56:52 +03:00
|
|
|
|
2018-07-29 15:16:07 +03:00
|
|
|
use self::{
|
|
|
|
classes::*,
|
2018-07-30 14:08:06 +03:00
|
|
|
comments::{scan_comment, scan_shebang},
|
2018-07-29 15:16:07 +03:00
|
|
|
numbers::scan_number,
|
2018-07-30 14:08:06 +03:00
|
|
|
ptr::Ptr,
|
2018-07-29 15:16:07 +03:00
|
|
|
strings::{
|
2018-07-30 14:08:06 +03:00
|
|
|
is_string_literal_start, scan_byte_char_or_string, scan_char, scan_raw_string, scan_string,
|
|
|
|
},
|
2018-07-29 15:16:07 +03:00
|
|
|
};
|
2017-12-31 13:32:00 +03:00
|
|
|
|
2018-07-29 15:16:07 +03:00
|
|
|
/// A token of Rust source.
|
|
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
|
|
|
pub struct Token {
|
|
|
|
/// The kind of token.
|
|
|
|
pub kind: SyntaxKind,
|
|
|
|
/// The length of the token.
|
|
|
|
pub len: TextUnit,
|
|
|
|
}
|
2017-12-31 16:42:22 +03:00
|
|
|
|
2018-01-27 20:29:14 -05:00
|
|
|
/// Break a string up into its component tokens
|
2017-12-31 17:54:33 +03:00
|
|
|
pub fn tokenize(text: &str) -> Vec<Token> {
|
|
|
|
let mut text = text;
|
|
|
|
let mut acc = Vec::new();
|
|
|
|
while !text.is_empty() {
|
|
|
|
let token = next_token(text);
|
|
|
|
acc.push(token);
|
|
|
|
let len: u32 = token.len.into();
|
|
|
|
text = &text[len as usize..];
|
|
|
|
}
|
|
|
|
acc
|
|
|
|
}
|
2018-07-29 15:16:07 +03:00
|
|
|
|
2018-01-27 20:29:14 -05:00
|
|
|
/// Get the next token from a string
|
2017-12-29 23:33:04 +03:00
|
|
|
pub fn next_token(text: &str) -> Token {
|
|
|
|
assert!(!text.is_empty());
|
|
|
|
let mut ptr = Ptr::new(text);
|
|
|
|
let c = ptr.bump().unwrap();
|
|
|
|
let kind = next_token_inner(c, &mut ptr);
|
|
|
|
let len = ptr.into_len();
|
|
|
|
Token { kind, len }
|
|
|
|
}
|
|
|
|
|
|
|
|
fn next_token_inner(c: char, ptr: &mut Ptr) -> SyntaxKind {
|
2017-12-30 00:48:47 +03:00
|
|
|
if is_whitespace(c) {
|
|
|
|
ptr.bump_while(is_whitespace);
|
|
|
|
return WHITESPACE;
|
|
|
|
}
|
2017-12-29 23:33:04 +03:00
|
|
|
|
2017-12-31 16:42:22 +03:00
|
|
|
match c {
|
2018-10-15 17:44:23 -04:00
|
|
|
'#' => {
|
|
|
|
if scan_shebang(ptr) {
|
|
|
|
return SHEBANG;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
'/' => {
|
|
|
|
if let Some(kind) = scan_comment(ptr) {
|
|
|
|
return kind;
|
|
|
|
}
|
|
|
|
}
|
2017-12-31 16:42:22 +03:00
|
|
|
_ => (),
|
|
|
|
}
|
|
|
|
|
2018-09-04 22:56:16 -07:00
|
|
|
let ident_start = is_ident_start(c) && !is_string_literal_start(c, ptr.current(), ptr.nth(1));
|
2017-12-31 16:42:22 +03:00
|
|
|
if ident_start {
|
|
|
|
return scan_ident(c, ptr);
|
|
|
|
}
|
|
|
|
|
2017-12-30 15:22:40 +03:00
|
|
|
if is_dec_digit(c) {
|
2017-12-31 10:41:42 +03:00
|
|
|
let kind = scan_number(c, ptr);
|
|
|
|
scan_literal_suffix(ptr);
|
|
|
|
return kind;
|
2017-12-30 15:22:40 +03:00
|
|
|
}
|
|
|
|
|
2017-12-30 16:30:37 +03:00
|
|
|
// One-byte tokens.
|
2018-07-30 17:46:50 +03:00
|
|
|
if let Some(kind) = SyntaxKind::from_char(c) {
|
|
|
|
return kind;
|
|
|
|
}
|
2017-12-30 18:25:37 +03:00
|
|
|
|
2018-07-30 17:46:50 +03:00
|
|
|
match c {
|
2019-04-28 23:46:03 +08:00
|
|
|
// Possiblily multi-byte tokens,
|
|
|
|
// but we only produce single byte token now
|
2019-05-15 15:35:47 +03:00
|
|
|
// T![...], T![..], T![..=], T![.]
|
|
|
|
'.' => return T![.],
|
|
|
|
// T![::] T![:]
|
|
|
|
':' => return T![:],
|
|
|
|
// T![==] FATARROW T![=]
|
|
|
|
'=' => return T![=],
|
|
|
|
// T![!=] T![!]
|
|
|
|
'!' => return T![!],
|
|
|
|
// T![->] T![-]
|
|
|
|
'-' => return T![-],
|
2017-12-31 10:41:42 +03:00
|
|
|
|
2017-12-31 13:32:00 +03:00
|
|
|
// If the character is an ident start not followed by another single
|
|
|
|
// quote, then this is a lifetime name:
|
2018-01-27 18:31:23 -05:00
|
|
|
'\'' => {
|
2018-09-04 22:56:16 -07:00
|
|
|
return if ptr.at_p(is_ident_start) && !ptr.at_str("''") {
|
2017-12-31 13:32:00 +03:00
|
|
|
ptr.bump();
|
2018-09-04 22:56:16 -07:00
|
|
|
while ptr.at_p(is_ident_continue) {
|
2018-01-27 18:31:23 -05:00
|
|
|
ptr.bump();
|
|
|
|
}
|
|
|
|
// lifetimes shouldn't end with a single quote
|
|
|
|
// if we find one, then this is an invalid character literal
|
2018-09-04 22:56:16 -07:00
|
|
|
if ptr.at('\'') {
|
2018-01-27 18:31:23 -05:00
|
|
|
ptr.bump();
|
2018-12-21 22:41:09 +00:00
|
|
|
return CHAR;
|
2018-01-27 18:31:23 -05:00
|
|
|
}
|
|
|
|
LIFETIME
|
|
|
|
} else {
|
|
|
|
scan_char(ptr);
|
|
|
|
scan_literal_suffix(ptr);
|
|
|
|
CHAR
|
2018-01-27 18:36:02 -05:00
|
|
|
};
|
2018-01-27 18:31:23 -05:00
|
|
|
}
|
2017-12-31 14:10:18 +03:00
|
|
|
'b' => {
|
|
|
|
let kind = scan_byte_char_or_string(ptr);
|
|
|
|
scan_literal_suffix(ptr);
|
2018-01-27 18:31:23 -05:00
|
|
|
return kind;
|
|
|
|
}
|
2017-12-31 15:14:47 +03:00
|
|
|
'"' => {
|
|
|
|
scan_string(ptr);
|
|
|
|
scan_literal_suffix(ptr);
|
|
|
|
return STRING;
|
|
|
|
}
|
|
|
|
'r' => {
|
|
|
|
scan_raw_string(ptr);
|
|
|
|
scan_literal_suffix(ptr);
|
|
|
|
return RAW_STRING;
|
|
|
|
}
|
2017-12-30 16:30:37 +03:00
|
|
|
_ => (),
|
|
|
|
}
|
2017-12-30 00:53:06 +03:00
|
|
|
ERROR
|
2017-12-29 23:33:04 +03:00
|
|
|
}
|
|
|
|
|
2017-12-30 15:22:40 +03:00
|
|
|
fn scan_ident(c: char, ptr: &mut Ptr) -> SyntaxKind {
|
2019-01-23 13:55:31 -05:00
|
|
|
let is_raw = match (c, ptr.current()) {
|
2019-01-23 12:15:47 -05:00
|
|
|
('r', Some('#')) => {
|
|
|
|
ptr.bump();
|
2019-01-23 13:55:31 -05:00
|
|
|
true
|
2019-01-23 12:15:47 -05:00
|
|
|
}
|
2019-05-15 15:35:47 +03:00
|
|
|
('_', None) => return T![_],
|
|
|
|
('_', Some(c)) if !is_ident_continue(c) => return T![_],
|
2019-01-23 13:55:31 -05:00
|
|
|
_ => false,
|
|
|
|
};
|
2017-12-30 15:22:40 +03:00
|
|
|
ptr.bump_while(is_ident_continue);
|
2019-01-23 13:55:31 -05:00
|
|
|
if !is_raw {
|
|
|
|
if let Some(kind) = SyntaxKind::from_keyword(ptr.current_token_text()) {
|
|
|
|
return kind;
|
|
|
|
}
|
2018-01-01 18:58:46 +03:00
|
|
|
}
|
2019-01-23 13:14:34 -05:00
|
|
|
IDENT
|
2017-12-30 15:22:40 +03:00
|
|
|
}
|
|
|
|
|
2017-12-31 10:41:42 +03:00
|
|
|
fn scan_literal_suffix(ptr: &mut Ptr) {
|
2018-09-04 22:56:16 -07:00
|
|
|
if ptr.at_p(is_ident_start) {
|
2017-12-31 11:15:27 +03:00
|
|
|
ptr.bump();
|
|
|
|
}
|
|
|
|
ptr.bump_while(is_ident_continue);
|
2017-12-31 10:41:42 +03:00
|
|
|
}
|
2019-04-05 18:45:19 +08:00
|
|
|
|
|
|
|
pub fn classify_literal(text: &str) -> Option<Token> {
|
|
|
|
let tkn = next_token(text);
|
2019-04-05 20:58:24 +08:00
|
|
|
if !tkn.kind.is_literal() || tkn.len.to_usize() != text.len() {
|
2019-04-05 18:45:19 +08:00
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
Some(tkn)
|
|
|
|
}
|