2019-09-30 11:58:53 +03:00
|
|
|
//! FIXME: write short doc here
|
|
|
|
|
2018-10-15 19:55:32 +03:00
|
|
|
use crate::{
|
2018-07-29 15:16:07 +03:00
|
|
|
SyntaxKind::{self, *},
|
2019-07-22 17:56:19 +03:00
|
|
|
TextUnit,
|
2018-07-29 15:16:07 +03:00
|
|
|
};
|
2017-12-31 13:32:00 +03:00
|
|
|
|
2018-07-29 15:16:07 +03:00
|
|
|
/// A token of Rust source.
|
|
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
|
|
|
pub struct Token {
|
|
|
|
/// The kind of token.
|
|
|
|
pub kind: SyntaxKind,
|
|
|
|
/// The length of the token.
|
|
|
|
pub len: TextUnit,
|
|
|
|
}
|
2017-12-31 16:42:22 +03:00
|
|
|
|
2019-08-20 19:16:57 +03:00
|
|
|
fn match_literal_kind(kind: rustc_lexer::LiteralKind) -> SyntaxKind {
|
2019-08-09 15:04:13 +07:00
|
|
|
match kind {
|
2019-08-20 19:16:57 +03:00
|
|
|
rustc_lexer::LiteralKind::Int { .. } => INT_NUMBER,
|
|
|
|
rustc_lexer::LiteralKind::Float { .. } => FLOAT_NUMBER,
|
|
|
|
rustc_lexer::LiteralKind::Char { .. } => CHAR,
|
|
|
|
rustc_lexer::LiteralKind::Byte { .. } => BYTE,
|
|
|
|
rustc_lexer::LiteralKind::Str { .. } => STRING,
|
|
|
|
rustc_lexer::LiteralKind::ByteStr { .. } => BYTE_STRING,
|
|
|
|
rustc_lexer::LiteralKind::RawStr { .. } => RAW_STRING,
|
|
|
|
rustc_lexer::LiteralKind::RawByteStr { .. } => RAW_BYTE_STRING,
|
2019-08-09 15:04:13 +07:00
|
|
|
}
|
2019-08-09 14:08:34 +07:00
|
|
|
}
|
|
|
|
|
2018-01-27 20:29:14 -05:00
|
|
|
/// Break a string up into its component tokens
|
2017-12-31 17:54:33 +03:00
|
|
|
pub fn tokenize(text: &str) -> Vec<Token> {
|
2019-07-22 17:47:33 +03:00
|
|
|
if text.is_empty() {
|
|
|
|
return vec![];
|
|
|
|
}
|
2017-12-31 17:54:33 +03:00
|
|
|
let mut text = text;
|
|
|
|
let mut acc = Vec::new();
|
2019-08-20 19:16:57 +03:00
|
|
|
if let Some(len) = rustc_lexer::strip_shebang(text) {
|
2019-07-22 17:47:33 +03:00
|
|
|
acc.push(Token { kind: SHEBANG, len: TextUnit::from_usize(len) });
|
|
|
|
text = &text[len..];
|
|
|
|
}
|
2017-12-31 17:54:33 +03:00
|
|
|
while !text.is_empty() {
|
2019-08-20 19:16:57 +03:00
|
|
|
let rustc_token = rustc_lexer::first_token(text);
|
2019-07-22 17:47:33 +03:00
|
|
|
let kind = match rustc_token.kind {
|
2019-08-20 19:16:57 +03:00
|
|
|
rustc_lexer::TokenKind::LineComment => COMMENT,
|
|
|
|
rustc_lexer::TokenKind::BlockComment { .. } => COMMENT,
|
|
|
|
rustc_lexer::TokenKind::Whitespace => WHITESPACE,
|
|
|
|
rustc_lexer::TokenKind::Ident => {
|
2019-07-22 17:47:33 +03:00
|
|
|
let token_text = &text[..rustc_token.len];
|
|
|
|
if token_text == "_" {
|
|
|
|
UNDERSCORE
|
|
|
|
} else {
|
|
|
|
SyntaxKind::from_keyword(&text[..rustc_token.len]).unwrap_or(IDENT)
|
|
|
|
}
|
|
|
|
}
|
2019-08-20 19:16:57 +03:00
|
|
|
rustc_lexer::TokenKind::RawIdent => IDENT,
|
|
|
|
rustc_lexer::TokenKind::Literal { kind, .. } => match_literal_kind(kind),
|
|
|
|
rustc_lexer::TokenKind::Lifetime { .. } => LIFETIME,
|
|
|
|
rustc_lexer::TokenKind::Semi => SEMI,
|
|
|
|
rustc_lexer::TokenKind::Comma => COMMA,
|
|
|
|
rustc_lexer::TokenKind::Dot => DOT,
|
|
|
|
rustc_lexer::TokenKind::OpenParen => L_PAREN,
|
|
|
|
rustc_lexer::TokenKind::CloseParen => R_PAREN,
|
|
|
|
rustc_lexer::TokenKind::OpenBrace => L_CURLY,
|
|
|
|
rustc_lexer::TokenKind::CloseBrace => R_CURLY,
|
|
|
|
rustc_lexer::TokenKind::OpenBracket => L_BRACK,
|
|
|
|
rustc_lexer::TokenKind::CloseBracket => R_BRACK,
|
|
|
|
rustc_lexer::TokenKind::At => AT,
|
|
|
|
rustc_lexer::TokenKind::Pound => POUND,
|
|
|
|
rustc_lexer::TokenKind::Tilde => TILDE,
|
|
|
|
rustc_lexer::TokenKind::Question => QUESTION,
|
|
|
|
rustc_lexer::TokenKind::Colon => COLON,
|
|
|
|
rustc_lexer::TokenKind::Dollar => DOLLAR,
|
|
|
|
rustc_lexer::TokenKind::Eq => EQ,
|
|
|
|
rustc_lexer::TokenKind::Not => EXCL,
|
|
|
|
rustc_lexer::TokenKind::Lt => L_ANGLE,
|
|
|
|
rustc_lexer::TokenKind::Gt => R_ANGLE,
|
|
|
|
rustc_lexer::TokenKind::Minus => MINUS,
|
|
|
|
rustc_lexer::TokenKind::And => AMP,
|
|
|
|
rustc_lexer::TokenKind::Or => PIPE,
|
|
|
|
rustc_lexer::TokenKind::Plus => PLUS,
|
|
|
|
rustc_lexer::TokenKind::Star => STAR,
|
|
|
|
rustc_lexer::TokenKind::Slash => SLASH,
|
|
|
|
rustc_lexer::TokenKind::Caret => CARET,
|
|
|
|
rustc_lexer::TokenKind::Percent => PERCENT,
|
|
|
|
rustc_lexer::TokenKind::Unknown => ERROR,
|
2019-07-22 17:47:33 +03:00
|
|
|
};
|
|
|
|
let token = Token { kind, len: TextUnit::from_usize(rustc_token.len) };
|
2017-12-31 17:54:33 +03:00
|
|
|
acc.push(token);
|
2019-07-22 17:47:33 +03:00
|
|
|
text = &text[rustc_token.len..];
|
2017-12-31 17:54:33 +03:00
|
|
|
}
|
|
|
|
acc
|
|
|
|
}
|
2018-07-29 15:16:07 +03:00
|
|
|
|
2019-04-05 18:45:19 +08:00
|
|
|
pub fn classify_literal(text: &str) -> Option<Token> {
|
2019-08-20 19:16:57 +03:00
|
|
|
let t = rustc_lexer::first_token(text);
|
2019-07-22 17:56:19 +03:00
|
|
|
if t.len != text.len() {
|
2019-04-05 18:45:19 +08:00
|
|
|
return None;
|
|
|
|
}
|
2019-07-22 17:56:19 +03:00
|
|
|
let kind = match t.kind {
|
2019-08-20 19:16:57 +03:00
|
|
|
rustc_lexer::TokenKind::Literal { kind, .. } => match_literal_kind(kind),
|
2019-07-22 17:56:19 +03:00
|
|
|
_ => return None,
|
|
|
|
};
|
|
|
|
Some(Token { kind, len: TextUnit::from_usize(t.len) })
|
2019-04-05 18:45:19 +08:00
|
|
|
}
|