2020-01-24 03:39:23 +02:00
|
|
|
//! Lexer analyzes raw input string and produces lexemes (tokens).
|
2020-01-26 20:44:49 +02:00
|
|
|
//! It is just a bridge to `rustc_lexer`.
|
2019-09-30 11:58:53 +03:00
|
|
|
|
2018-10-15 19:55:32 +03:00
|
|
|
use crate::{
|
2020-01-26 20:44:49 +02:00
|
|
|
SyntaxError, SyntaxErrorKind,
|
2018-07-29 15:16:07 +03:00
|
|
|
SyntaxKind::{self, *},
|
2020-01-26 20:44:49 +02:00
|
|
|
TextRange, TextUnit,
|
2018-07-29 15:16:07 +03:00
|
|
|
};
|
2017-12-31 13:32:00 +03:00
|
|
|
|
2018-07-29 15:16:07 +03:00
|
|
|
/// A token of Rust source.
|
|
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
|
|
|
pub struct Token {
|
|
|
|
/// The kind of token.
|
|
|
|
pub kind: SyntaxKind,
|
|
|
|
/// The length of the token.
|
|
|
|
pub len: TextUnit,
|
|
|
|
}
|
2017-12-31 16:42:22 +03:00
|
|
|
|
2020-01-26 20:44:49 +02:00
|
|
|
/// Break a string up into its component tokens.
|
|
|
|
/// Beware that it checks for shebang first and its length contributes to resulting
|
|
|
|
/// tokens offsets.
|
2020-01-28 07:09:13 +02:00
|
|
|
pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
|
2020-01-26 20:44:49 +02:00
|
|
|
// non-empty string is a precondtion of `rustc_lexer::strip_shebang()`.
|
|
|
|
if text.is_empty() {
|
2020-01-28 07:09:13 +02:00
|
|
|
return Default::default();
|
2020-01-26 20:44:49 +02:00
|
|
|
}
|
2020-01-24 03:39:23 +02:00
|
|
|
|
2020-01-28 07:09:13 +02:00
|
|
|
let mut tokens = Vec::new();
|
|
|
|
let mut errors = Vec::new();
|
|
|
|
|
2020-01-26 20:44:49 +02:00
|
|
|
let mut offset: usize = rustc_lexer::strip_shebang(text)
|
|
|
|
.map(|shebang_len| {
|
2020-01-28 07:09:13 +02:00
|
|
|
tokens.push(Token { kind: SHEBANG, len: TextUnit::from_usize(shebang_len) });
|
2020-01-26 20:44:49 +02:00
|
|
|
shebang_len
|
|
|
|
})
|
|
|
|
.unwrap_or(0);
|
2020-01-24 03:39:23 +02:00
|
|
|
|
2020-01-26 20:44:49 +02:00
|
|
|
let text_without_shebang = &text[offset..];
|
2020-01-24 03:39:23 +02:00
|
|
|
|
2020-01-26 20:44:49 +02:00
|
|
|
for rustc_token in rustc_lexer::tokenize(text_without_shebang) {
|
2020-01-28 07:09:13 +02:00
|
|
|
let token_len = TextUnit::from_usize(rustc_token.len);
|
|
|
|
let token_range = TextRange::offset_len(TextUnit::from_usize(offset), token_len);
|
|
|
|
|
|
|
|
let (syntax_kind, error) =
|
|
|
|
rustc_token_kind_to_syntax_kind(&rustc_token.kind, &text[token_range]);
|
|
|
|
|
|
|
|
tokens.push(Token { kind: syntax_kind, len: token_len });
|
|
|
|
|
|
|
|
if let Some(error) = error {
|
|
|
|
errors.push(SyntaxError::new(SyntaxErrorKind::TokenizeError(error), token_range));
|
|
|
|
}
|
|
|
|
|
2020-01-26 20:44:49 +02:00
|
|
|
offset += rustc_token.len;
|
|
|
|
}
|
2020-01-28 07:09:13 +02:00
|
|
|
|
|
|
|
(tokens, errors)
|
2020-01-24 03:39:23 +02:00
|
|
|
}
|
|
|
|
|
2020-01-28 07:09:13 +02:00
|
|
|
/// Returns `SyntaxKind` and `Option<SyntaxError>` of the first token
|
|
|
|
/// encountered at the beginning of the string.
|
|
|
|
///
|
|
|
|
/// Returns `None` if the string contains zero *or two or more* tokens.
|
|
|
|
/// The token is malformed if the returned error is not `None`.
|
|
|
|
///
|
|
|
|
/// Beware that unescape errors are not checked at tokenization time.
|
|
|
|
pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> {
|
2020-01-28 07:18:35 +02:00
|
|
|
lex_first_token(text)
|
2020-01-28 07:09:13 +02:00
|
|
|
.filter(|(token, _)| token.len.to_usize() == text.len())
|
|
|
|
.map(|(token, error)| (token.kind, error))
|
|
|
|
}
|
|
|
|
|
2020-01-28 07:13:18 +02:00
|
|
|
/// The same as `lex_single_syntax_kind()` but returns only `SyntaxKind` and
|
2020-01-28 07:09:13 +02:00
|
|
|
/// returns `None` if any tokenization error occured.
|
2020-01-26 20:44:49 +02:00
|
|
|
///
|
2020-01-28 07:09:13 +02:00
|
|
|
/// Beware that unescape errors are not checked at tokenization time.
|
|
|
|
pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> {
|
2020-01-28 07:18:35 +02:00
|
|
|
lex_first_token(text)
|
2020-01-28 07:09:13 +02:00
|
|
|
.filter(|(token, error)| !error.is_some() && token.len.to_usize() == text.len())
|
|
|
|
.map(|(token, _error)| token.kind)
|
2020-01-24 03:39:23 +02:00
|
|
|
}
|
|
|
|
|
2020-01-28 07:13:18 +02:00
|
|
|
/// Returns `SyntaxKind` and `Option<SyntaxError>` of the first token
|
|
|
|
/// encountered at the beginning of the string.
|
2020-01-26 20:44:49 +02:00
|
|
|
///
|
2020-01-28 07:09:13 +02:00
|
|
|
/// Returns `None` if the string contains zero tokens or if the token was parsed
|
|
|
|
/// with an error.
|
2020-01-28 07:13:18 +02:00
|
|
|
/// The token is malformed if the returned error is not `None`.
|
2020-01-28 07:09:13 +02:00
|
|
|
///
|
|
|
|
/// Beware that unescape errors are not checked at tokenization time.
|
2020-01-28 07:18:35 +02:00
|
|
|
fn lex_first_token(text: &str) -> Option<(Token, Option<SyntaxError>)> {
|
2020-01-26 20:44:49 +02:00
|
|
|
// non-empty string is a precondtion of `rustc_lexer::first_token()`.
|
|
|
|
if text.is_empty() {
|
2020-01-28 07:09:13 +02:00
|
|
|
return None;
|
2020-01-26 20:44:49 +02:00
|
|
|
}
|
2020-01-28 07:09:13 +02:00
|
|
|
|
|
|
|
let rustc_token = rustc_lexer::first_token(text);
|
|
|
|
let (syntax_kind, error) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, text);
|
|
|
|
|
|
|
|
let token = Token { kind: syntax_kind, len: TextUnit::from_usize(rustc_token.len) };
|
|
|
|
let error = error.map(|error| {
|
|
|
|
SyntaxError::new(
|
|
|
|
SyntaxErrorKind::TokenizeError(error),
|
|
|
|
TextRange::from_to(TextUnit::from(0), TextUnit::of_str(text)),
|
|
|
|
)
|
|
|
|
});
|
|
|
|
|
|
|
|
Some((token, error))
|
2020-01-26 20:44:49 +02:00
|
|
|
}
|
|
|
|
|
2020-01-28 07:09:13 +02:00
|
|
|
// FIXME: simplify TokenizeError to `SyntaxError(String, TextRange)` as per @matklad advice:
|
|
|
|
// https://github.com/rust-analyzer/rust-analyzer/pull/2911/files#r371175067
|
|
|
|
|
2020-01-26 20:44:49 +02:00
|
|
|
/// Describes the values of `SyntaxErrorKind::TokenizeError` enum variant.
|
|
|
|
/// It describes all the types of errors that may happen during the tokenization
|
|
|
|
/// of Rust source.
|
|
|
|
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
2020-01-24 03:39:23 +02:00
|
|
|
pub enum TokenizeError {
|
|
|
|
/// Base prefix was provided, but there were no digits
|
2020-01-27 01:38:16 +02:00
|
|
|
/// after it, e.g. `0x`, `0b`.
|
2020-01-24 03:39:23 +02:00
|
|
|
EmptyInt,
|
2020-01-27 01:38:16 +02:00
|
|
|
/// Float exponent lacks digits e.g. `12.34e+`, `12.3E+`, `12e-`, `1_E-`,
|
2020-01-24 03:39:23 +02:00
|
|
|
EmptyExponent,
|
|
|
|
|
|
|
|
/// Block comment lacks trailing delimiter `*/`
|
|
|
|
UnterminatedBlockComment,
|
|
|
|
/// Character literal lacks trailing delimiter `'`
|
|
|
|
UnterminatedChar,
|
|
|
|
/// Characterish byte literal lacks trailing delimiter `'`
|
|
|
|
UnterminatedByte,
|
|
|
|
/// String literal lacks trailing delimiter `"`
|
|
|
|
UnterminatedString,
|
|
|
|
/// Byte string literal lacks trailing delimiter `"`
|
|
|
|
UnterminatedByteString,
|
|
|
|
/// Raw literal lacks trailing delimiter e.g. `"##`
|
|
|
|
UnterminatedRawString,
|
|
|
|
/// Raw byte string literal lacks trailing delimiter e.g. `"##`
|
|
|
|
UnterminatedRawByteString,
|
|
|
|
|
2020-01-26 20:44:49 +02:00
|
|
|
/// Raw string lacks a quote after the pound characters e.g. `r###`
|
2020-01-24 03:39:23 +02:00
|
|
|
UnstartedRawString,
|
2020-01-26 20:44:49 +02:00
|
|
|
/// Raw byte string lacks a quote after the pound characters e.g. `br###`
|
2020-01-24 03:39:23 +02:00
|
|
|
UnstartedRawByteString,
|
|
|
|
|
|
|
|
/// Lifetime starts with a number e.g. `'4ever`
|
|
|
|
LifetimeStartsWithNumber,
|
|
|
|
}
|
|
|
|
|
2020-01-28 07:09:13 +02:00
|
|
|
fn rustc_token_kind_to_syntax_kind(
|
|
|
|
rustc_token_kind: &rustc_lexer::TokenKind,
|
|
|
|
token_text: &str,
|
|
|
|
) -> (SyntaxKind, Option<TokenizeError>) {
|
|
|
|
// A note on an intended tradeoff:
|
2020-01-24 03:39:23 +02:00
|
|
|
// We drop some useful infromation here (see patterns with double dots `..`)
|
|
|
|
// Storing that info in `SyntaxKind` is not possible due to its layout requirements of
|
2020-01-28 07:09:13 +02:00
|
|
|
// being `u16` that come from `rowan::SyntaxKind`.
|
2020-01-24 03:39:23 +02:00
|
|
|
|
2020-01-28 07:09:13 +02:00
|
|
|
let syntax_kind = {
|
2020-01-26 20:44:49 +02:00
|
|
|
use rustc_lexer::TokenKind as TK;
|
|
|
|
use TokenizeError as TE;
|
|
|
|
|
2020-01-28 07:09:13 +02:00
|
|
|
match rustc_token_kind {
|
|
|
|
TK::LineComment => COMMENT,
|
|
|
|
|
|
|
|
TK::BlockComment { terminated: true } => COMMENT,
|
|
|
|
TK::BlockComment { terminated: false } => {
|
|
|
|
return (COMMENT, Some(TE::UnterminatedBlockComment));
|
2020-01-26 20:44:49 +02:00
|
|
|
}
|
2020-01-28 07:09:13 +02:00
|
|
|
|
|
|
|
TK::Whitespace => WHITESPACE,
|
|
|
|
|
|
|
|
TK::Ident => {
|
|
|
|
if token_text == "_" {
|
|
|
|
UNDERSCORE
|
|
|
|
} else {
|
|
|
|
SyntaxKind::from_keyword(token_text).unwrap_or(IDENT)
|
|
|
|
}
|
2020-01-26 20:44:49 +02:00
|
|
|
}
|
2020-01-24 03:39:23 +02:00
|
|
|
|
2020-01-28 07:09:13 +02:00
|
|
|
TK::RawIdent => IDENT,
|
|
|
|
TK::Literal { kind, .. } => return match_literal_kind(&kind),
|
|
|
|
|
|
|
|
TK::Lifetime { starts_with_number: false } => LIFETIME,
|
|
|
|
TK::Lifetime { starts_with_number: true } => {
|
|
|
|
return (LIFETIME, Some(TE::LifetimeStartsWithNumber))
|
|
|
|
}
|
|
|
|
|
|
|
|
TK::Semi => SEMI,
|
|
|
|
TK::Comma => COMMA,
|
|
|
|
TK::Dot => DOT,
|
|
|
|
TK::OpenParen => L_PAREN,
|
|
|
|
TK::CloseParen => R_PAREN,
|
|
|
|
TK::OpenBrace => L_CURLY,
|
|
|
|
TK::CloseBrace => R_CURLY,
|
|
|
|
TK::OpenBracket => L_BRACK,
|
|
|
|
TK::CloseBracket => R_BRACK,
|
|
|
|
TK::At => AT,
|
|
|
|
TK::Pound => POUND,
|
|
|
|
TK::Tilde => TILDE,
|
|
|
|
TK::Question => QUESTION,
|
|
|
|
TK::Colon => COLON,
|
|
|
|
TK::Dollar => DOLLAR,
|
|
|
|
TK::Eq => EQ,
|
|
|
|
TK::Not => EXCL,
|
|
|
|
TK::Lt => L_ANGLE,
|
|
|
|
TK::Gt => R_ANGLE,
|
|
|
|
TK::Minus => MINUS,
|
|
|
|
TK::And => AMP,
|
|
|
|
TK::Or => PIPE,
|
|
|
|
TK::Plus => PLUS,
|
|
|
|
TK::Star => STAR,
|
|
|
|
TK::Slash => SLASH,
|
|
|
|
TK::Caret => CARET,
|
|
|
|
TK::Percent => PERCENT,
|
|
|
|
TK::Unknown => ERROR,
|
|
|
|
}
|
2020-01-26 20:44:49 +02:00
|
|
|
};
|
2020-01-24 03:39:23 +02:00
|
|
|
|
2020-01-28 07:09:13 +02:00
|
|
|
return (syntax_kind, None);
|
2020-01-24 03:39:23 +02:00
|
|
|
|
2020-01-28 07:09:13 +02:00
|
|
|
fn match_literal_kind(kind: &rustc_lexer::LiteralKind) -> (SyntaxKind, Option<TokenizeError>) {
|
2020-01-24 03:39:23 +02:00
|
|
|
use rustc_lexer::LiteralKind as LK;
|
2020-01-26 20:44:49 +02:00
|
|
|
use TokenizeError as TE;
|
|
|
|
|
2020-01-28 07:09:13 +02:00
|
|
|
#[rustfmt::skip]
|
|
|
|
let syntax_kind = match *kind {
|
|
|
|
LK::Int { empty_int: false, .. } => INT_NUMBER,
|
|
|
|
LK::Int { empty_int: true, .. } => {
|
|
|
|
return (INT_NUMBER, Some(TE::EmptyInt))
|
|
|
|
}
|
|
|
|
|
|
|
|
LK::Float { empty_exponent: false, .. } => FLOAT_NUMBER,
|
|
|
|
LK::Float { empty_exponent: true, .. } => {
|
|
|
|
return (FLOAT_NUMBER, Some(TE::EmptyExponent))
|
|
|
|
}
|
|
|
|
|
|
|
|
LK::Char { terminated: true } => CHAR,
|
|
|
|
LK::Char { terminated: false } => {
|
|
|
|
return (CHAR, Some(TE::UnterminatedChar))
|
|
|
|
}
|
|
|
|
|
|
|
|
LK::Byte { terminated: true } => BYTE,
|
|
|
|
LK::Byte { terminated: false } => {
|
|
|
|
return (BYTE, Some(TE::UnterminatedByte))
|
2019-07-22 17:47:33 +03:00
|
|
|
}
|
2020-01-28 07:09:13 +02:00
|
|
|
|
|
|
|
LK::Str { terminated: true } => STRING,
|
|
|
|
LK::Str { terminated: false } => {
|
|
|
|
return (STRING, Some(TE::UnterminatedString))
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
LK::ByteStr { terminated: true } => BYTE_STRING,
|
|
|
|
LK::ByteStr { terminated: false } => {
|
|
|
|
return (BYTE_STRING, Some(TE::UnterminatedByteString))
|
2020-01-24 03:39:23 +02:00
|
|
|
}
|
|
|
|
|
2020-01-28 07:09:13 +02:00
|
|
|
LK::RawStr { started: true, terminated: true, .. } => RAW_STRING,
|
|
|
|
LK::RawStr { started: true, terminated: false, .. } => {
|
|
|
|
return (RAW_STRING, Some(TE::UnterminatedRawString))
|
|
|
|
}
|
|
|
|
LK::RawStr { started: false, .. } => {
|
|
|
|
return (RAW_STRING, Some(TE::UnstartedRawString))
|
2020-01-24 03:39:23 +02:00
|
|
|
}
|
|
|
|
|
2020-01-28 07:09:13 +02:00
|
|
|
LK::RawByteStr { started: true, terminated: true, .. } => RAW_BYTE_STRING,
|
|
|
|
LK::RawByteStr { started: true, terminated: false, .. } => {
|
|
|
|
return (RAW_BYTE_STRING, Some(TE::UnterminatedRawByteString))
|
2020-01-24 03:39:23 +02:00
|
|
|
}
|
|
|
|
LK::RawByteStr { started: false, .. } => {
|
2020-01-28 07:09:13 +02:00
|
|
|
return (RAW_BYTE_STRING, Some(TE::UnstartedRawByteString))
|
2020-01-24 03:39:23 +02:00
|
|
|
}
|
2020-01-28 07:09:13 +02:00
|
|
|
};
|
|
|
|
|
2020-01-26 20:44:49 +02:00
|
|
|
(syntax_kind, None)
|
2017-12-31 17:54:33 +03:00
|
|
|
}
|
2019-04-05 18:45:19 +08:00
|
|
|
}
|