279 lines
9.9 KiB
Rust
Raw Normal View History

2018-07-30 14:08:06 +03:00
mod classes;
2018-07-29 15:16:07 +03:00
mod comments;
mod numbers;
2018-07-30 14:08:06 +03:00
mod ptr;
mod strings;
2017-12-30 00:48:47 +03:00
2018-10-15 19:55:32 +03:00
use crate::{
2018-07-29 15:16:07 +03:00
SyntaxKind::{self, *},
TextUnit, T,
2018-07-29 15:16:07 +03:00
};
2017-12-30 15:56:52 +03:00
2018-07-29 15:16:07 +03:00
use self::{
classes::*,
2018-07-30 14:08:06 +03:00
comments::{scan_comment, scan_shebang},
2018-07-29 15:16:07 +03:00
numbers::scan_number,
2018-07-30 14:08:06 +03:00
ptr::Ptr,
2018-07-29 15:16:07 +03:00
strings::{
2018-07-30 14:08:06 +03:00
is_string_literal_start, scan_byte_char_or_string, scan_char, scan_raw_string, scan_string,
},
2018-07-29 15:16:07 +03:00
};
2018-07-29 15:16:07 +03:00
/// A token of Rust source.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Token {
/// The kind of token.
pub kind: SyntaxKind,
/// The length of the token.
pub len: TextUnit,
}
2017-12-31 16:42:22 +03:00
/// Break a string up into its component tokens
2017-12-31 17:54:33 +03:00
pub fn tokenize(text: &str) -> Vec<Token> {
2019-07-22 17:47:33 +03:00
if text.is_empty() {
return vec![];
}
2017-12-31 17:54:33 +03:00
let mut text = text;
let mut acc = Vec::new();
2019-07-22 17:47:33 +03:00
if let Some(len) = ra_rustc_lexer::strip_shebang(text) {
acc.push(Token { kind: SHEBANG, len: TextUnit::from_usize(len) });
text = &text[len..];
}
2017-12-31 17:54:33 +03:00
while !text.is_empty() {
2019-07-22 17:47:33 +03:00
let rustc_token = ra_rustc_lexer::first_token(text);
macro_rules! decompose {
($t1:expr, $t2:expr) => {{
acc.push(Token { kind: $t1, len: 1.into() });
acc.push(Token { kind: $t2, len: 1.into() });
text = &text[2..];
continue;
}};
($t1:expr, $t2:expr, $t3:expr) => {{
acc.push(Token { kind: $t1, len: 1.into() });
acc.push(Token { kind: $t2, len: 1.into() });
acc.push(Token { kind: $t3, len: 1.into() });
text = &text[3..];
continue;
}};
}
let kind = match rustc_token.kind {
ra_rustc_lexer::TokenKind::LineComment => COMMENT,
ra_rustc_lexer::TokenKind::BlockComment { .. } => COMMENT,
ra_rustc_lexer::TokenKind::Whitespace => WHITESPACE,
ra_rustc_lexer::TokenKind::Ident => {
let token_text = &text[..rustc_token.len];
if token_text == "_" {
UNDERSCORE
} else {
SyntaxKind::from_keyword(&text[..rustc_token.len]).unwrap_or(IDENT)
}
}
ra_rustc_lexer::TokenKind::RawIdent => IDENT,
ra_rustc_lexer::TokenKind::Literal { kind, .. } => match kind {
ra_rustc_lexer::LiteralKind::Int { .. } => INT_NUMBER,
ra_rustc_lexer::LiteralKind::Float { .. } => FLOAT_NUMBER,
ra_rustc_lexer::LiteralKind::Char { .. } => CHAR,
ra_rustc_lexer::LiteralKind::Byte { .. } => BYTE,
ra_rustc_lexer::LiteralKind::Str { .. } => STRING,
ra_rustc_lexer::LiteralKind::ByteStr { .. } => BYTE_STRING,
ra_rustc_lexer::LiteralKind::RawStr { .. } => RAW_STRING,
ra_rustc_lexer::LiteralKind::RawByteStr { .. } => RAW_BYTE_STRING,
},
ra_rustc_lexer::TokenKind::Lifetime { .. } => LIFETIME,
ra_rustc_lexer::TokenKind::Semi => SEMI,
ra_rustc_lexer::TokenKind::Comma => COMMA,
ra_rustc_lexer::TokenKind::DotDotDot => decompose!(DOT, DOT, DOT),
ra_rustc_lexer::TokenKind::DotDotEq => decompose!(DOT, DOT, EQ),
ra_rustc_lexer::TokenKind::DotDot => decompose!(DOT, DOT),
ra_rustc_lexer::TokenKind::Dot => DOT,
ra_rustc_lexer::TokenKind::OpenParen => L_PAREN,
ra_rustc_lexer::TokenKind::CloseParen => R_PAREN,
ra_rustc_lexer::TokenKind::OpenBrace => L_CURLY,
ra_rustc_lexer::TokenKind::CloseBrace => R_CURLY,
ra_rustc_lexer::TokenKind::OpenBracket => L_BRACK,
ra_rustc_lexer::TokenKind::CloseBracket => R_BRACK,
ra_rustc_lexer::TokenKind::At => AT,
ra_rustc_lexer::TokenKind::Pound => POUND,
ra_rustc_lexer::TokenKind::Tilde => TILDE,
ra_rustc_lexer::TokenKind::Question => QUESTION,
ra_rustc_lexer::TokenKind::ColonColon => decompose!(COLON, COLON),
ra_rustc_lexer::TokenKind::Colon => COLON,
ra_rustc_lexer::TokenKind::Dollar => DOLLAR,
ra_rustc_lexer::TokenKind::EqEq => decompose!(EQ, EQ),
ra_rustc_lexer::TokenKind::Eq => EQ,
ra_rustc_lexer::TokenKind::FatArrow => decompose!(EQ, R_ANGLE),
ra_rustc_lexer::TokenKind::Ne => decompose!(EXCL, EQ),
ra_rustc_lexer::TokenKind::Not => EXCL,
ra_rustc_lexer::TokenKind::Le => decompose!(L_ANGLE, EQ),
ra_rustc_lexer::TokenKind::LArrow => decompose!(COLON, MINUS),
ra_rustc_lexer::TokenKind::Lt => L_ANGLE,
ra_rustc_lexer::TokenKind::ShlEq => decompose!(L_ANGLE, L_ANGLE, EQ),
ra_rustc_lexer::TokenKind::Shl => decompose!(L_ANGLE, L_ANGLE),
ra_rustc_lexer::TokenKind::Ge => decompose!(R_ANGLE, EQ),
ra_rustc_lexer::TokenKind::Gt => R_ANGLE,
ra_rustc_lexer::TokenKind::ShrEq => decompose!(R_ANGLE, R_ANGLE, EQ),
ra_rustc_lexer::TokenKind::Shr => decompose!(R_ANGLE, R_ANGLE),
ra_rustc_lexer::TokenKind::RArrow => decompose!(MINUS, R_ANGLE),
ra_rustc_lexer::TokenKind::Minus => MINUS,
ra_rustc_lexer::TokenKind::MinusEq => decompose!(MINUS, EQ),
ra_rustc_lexer::TokenKind::And => AMP,
ra_rustc_lexer::TokenKind::AndAnd => decompose!(AMP, AMP),
ra_rustc_lexer::TokenKind::AndEq => decompose!(AMP, EQ),
ra_rustc_lexer::TokenKind::Or => PIPE,
ra_rustc_lexer::TokenKind::OrOr => decompose!(PIPE, PIPE),
ra_rustc_lexer::TokenKind::OrEq => decompose!(PIPE, EQ),
ra_rustc_lexer::TokenKind::PlusEq => decompose!(PLUS, EQ),
ra_rustc_lexer::TokenKind::Plus => PLUS,
ra_rustc_lexer::TokenKind::StarEq => decompose!(STAR, EQ),
ra_rustc_lexer::TokenKind::Star => STAR,
ra_rustc_lexer::TokenKind::SlashEq => decompose!(SLASH, EQ),
ra_rustc_lexer::TokenKind::Slash => SLASH,
ra_rustc_lexer::TokenKind::CaretEq => decompose!(CARET, EQ),
ra_rustc_lexer::TokenKind::Caret => CARET,
ra_rustc_lexer::TokenKind::PercentEq => decompose!(PERCENT, EQ),
ra_rustc_lexer::TokenKind::Percent => PERCENT,
ra_rustc_lexer::TokenKind::Unknown => ERROR,
};
let token = Token { kind, len: TextUnit::from_usize(rustc_token.len) };
2017-12-31 17:54:33 +03:00
acc.push(token);
2019-07-22 17:47:33 +03:00
text = &text[rustc_token.len..];
2017-12-31 17:54:33 +03:00
}
acc
}
2018-07-29 15:16:07 +03:00
/// Get the next token from a string
2019-07-22 17:47:33 +03:00
fn next_token(text: &str) -> Token {
2017-12-29 23:33:04 +03:00
assert!(!text.is_empty());
let mut ptr = Ptr::new(text);
let c = ptr.bump().unwrap();
let kind = next_token_inner(c, &mut ptr);
let len = ptr.into_len();
Token { kind, len }
}
fn next_token_inner(c: char, ptr: &mut Ptr) -> SyntaxKind {
2017-12-30 00:48:47 +03:00
if is_whitespace(c) {
ptr.bump_while(is_whitespace);
return WHITESPACE;
}
2017-12-29 23:33:04 +03:00
2017-12-31 16:42:22 +03:00
match c {
'#' => {
if scan_shebang(ptr) {
return SHEBANG;
}
}
'/' => {
if let Some(kind) = scan_comment(ptr) {
return kind;
}
}
2017-12-31 16:42:22 +03:00
_ => (),
}
let ident_start = is_ident_start(c) && !is_string_literal_start(c, ptr.current(), ptr.nth(1));
2017-12-31 16:42:22 +03:00
if ident_start {
return scan_ident(c, ptr);
}
2017-12-30 15:22:40 +03:00
if is_dec_digit(c) {
2017-12-31 10:41:42 +03:00
let kind = scan_number(c, ptr);
scan_literal_suffix(ptr);
return kind;
2017-12-30 15:22:40 +03:00
}
2017-12-30 16:30:37 +03:00
// One-byte tokens.
2018-07-30 17:46:50 +03:00
if let Some(kind) = SyntaxKind::from_char(c) {
return kind;
}
2017-12-30 18:25:37 +03:00
2018-07-30 17:46:50 +03:00
match c {
// Possiblily multi-byte tokens,
// but we only produce single byte token now
2019-05-15 15:35:47 +03:00
// T![...], T![..], T![..=], T![.]
'.' => return T![.],
// T![::] T![:]
':' => return T![:],
// T![==] FATARROW T![=]
'=' => return T![=],
// T![!=] T![!]
'!' => return T![!],
// T![->] T![-]
'-' => return T![-],
2017-12-31 10:41:42 +03:00
// If the character is an ident start not followed by another single
// quote, then this is a lifetime name:
2018-01-27 18:31:23 -05:00
'\'' => {
return if ptr.at_p(is_ident_start) && !ptr.at_str("''") {
ptr.bump();
while ptr.at_p(is_ident_continue) {
2018-01-27 18:31:23 -05:00
ptr.bump();
}
// lifetimes shouldn't end with a single quote
// if we find one, then this is an invalid character literal
if ptr.at('\'') {
2018-01-27 18:31:23 -05:00
ptr.bump();
2018-12-21 22:41:09 +00:00
return CHAR;
2018-01-27 18:31:23 -05:00
}
LIFETIME
} else {
scan_char(ptr);
scan_literal_suffix(ptr);
CHAR
2018-01-27 18:36:02 -05:00
};
2018-01-27 18:31:23 -05:00
}
2017-12-31 14:10:18 +03:00
'b' => {
let kind = scan_byte_char_or_string(ptr);
scan_literal_suffix(ptr);
2018-01-27 18:31:23 -05:00
return kind;
}
2017-12-31 15:14:47 +03:00
'"' => {
scan_string(ptr);
scan_literal_suffix(ptr);
return STRING;
}
'r' => {
scan_raw_string(ptr);
scan_literal_suffix(ptr);
return RAW_STRING;
}
2017-12-30 16:30:37 +03:00
_ => (),
}
2017-12-30 00:53:06 +03:00
ERROR
2017-12-29 23:33:04 +03:00
}
2017-12-30 15:22:40 +03:00
fn scan_ident(c: char, ptr: &mut Ptr) -> SyntaxKind {
2019-01-23 13:55:31 -05:00
let is_raw = match (c, ptr.current()) {
2019-01-23 12:15:47 -05:00
('r', Some('#')) => {
ptr.bump();
2019-01-23 13:55:31 -05:00
true
2019-01-23 12:15:47 -05:00
}
2019-05-15 15:35:47 +03:00
('_', None) => return T![_],
('_', Some(c)) if !is_ident_continue(c) => return T![_],
2019-01-23 13:55:31 -05:00
_ => false,
};
2017-12-30 15:22:40 +03:00
ptr.bump_while(is_ident_continue);
2019-01-23 13:55:31 -05:00
if !is_raw {
if let Some(kind) = SyntaxKind::from_keyword(ptr.current_token_text()) {
return kind;
}
2018-01-01 18:58:46 +03:00
}
IDENT
2017-12-30 15:22:40 +03:00
}
2017-12-31 10:41:42 +03:00
fn scan_literal_suffix(ptr: &mut Ptr) {
if ptr.at_p(is_ident_start) {
2017-12-31 11:15:27 +03:00
ptr.bump();
}
ptr.bump_while(is_ident_continue);
2017-12-31 10:41:42 +03:00
}
pub fn classify_literal(text: &str) -> Option<Token> {
let tkn = next_token(text);
if !tkn.kind.is_literal() || tkn.len.to_usize() != text.len() {
return None;
}
Some(tkn)
}