ra_syntax: refactored the lexer design as per @matklad and @kiljacken PR review

This commit is contained in:
Veetaha 2020-01-28 07:09:13 +02:00
parent bf60661aa3
commit 9e7eaa959f
9 changed files with 202 additions and 181 deletions

View File

@ -2,7 +2,9 @@
use hir::ModuleSource;
use ra_db::{RelativePath, RelativePathBuf, SourceDatabase, SourceDatabaseExt};
use ra_syntax::{algo::find_node_at_offset, ast, single_token, AstNode, SyntaxKind, SyntaxNode};
use ra_syntax::{
algo::find_node_at_offset, ast, lex_single_valid_syntax_kind, AstNode, SyntaxKind, SyntaxNode,
};
use ra_text_edit::TextEdit;
use crate::{
@ -17,7 +19,7 @@ pub(crate) fn rename(
position: FilePosition,
new_name: &str,
) -> Option<RangeInfo<SourceChange>> {
match single_token(new_name)?.token.kind {
match lex_single_valid_syntax_kind(new_name)? {
SyntaxKind::IDENT | SyntaxKind::UNDERSCORE => (),
_ => return None,
}

View File

@ -1,7 +1,7 @@
//! FIXME: write short doc here
use ra_parser::{Token, TokenSource};
use ra_syntax::{single_token, SmolStr, SyntaxKind, SyntaxKind::*, T};
use ra_syntax::{lex_single_valid_syntax_kind, SmolStr, SyntaxKind, SyntaxKind::*, T};
use std::cell::{Cell, Ref, RefCell};
use tt::buffer::{Cursor, TokenBuffer};
@ -129,8 +129,7 @@ fn convert_delim(d: Option<tt::DelimiterKind>, closing: bool) -> TtToken {
}
fn convert_literal(l: &tt::Literal) -> TtToken {
let kind = single_token(&l.text)
.map(|parsed| parsed.token.kind)
let kind = lex_single_valid_syntax_kind(&l.text)
.filter(|kind| kind.is_literal())
.unwrap_or_else(|| match l.text.as_ref() {
"true" => T![true],

View File

@ -41,7 +41,9 @@ use crate::syntax_node::GreenNode;
pub use crate::{
algo::InsertPosition,
ast::{AstNode, AstToken},
parsing::{first_token, single_token, tokenize, tokenize_append, Token, TokenizeError},
parsing::{
lex_single_syntax_kind, lex_single_valid_syntax_kind, tokenize, Token, TokenizeError,
},
ptr::{AstPtr, SyntaxNodePtr},
syntax_error::{Location, SyntaxError, SyntaxErrorKind},
syntax_node::{

View File

@ -15,9 +15,15 @@ pub use lexer::*;
pub(crate) use self::reparsing::incremental_reparse;
pub(crate) fn parse_text(text: &str) -> (GreenNode, Vec<SyntaxError>) {
let ParsedTokens { tokens, errors } = tokenize(&text);
let (tokens, lexer_errors) = tokenize(&text);
let mut token_source = TextTokenSource::new(text, &tokens);
let mut tree_sink = TextTreeSink::new(text, &tokens, errors);
let mut tree_sink = TextTreeSink::new(text, &tokens);
ra_parser::parse(&mut token_source, &mut tree_sink);
tree_sink.finish()
let (tree, mut parser_errors) = tree_sink.finish();
parser_errors.extend(lexer_errors);
(tree, parser_errors)
}

View File

@ -16,55 +16,21 @@ pub struct Token {
pub len: TextUnit,
}
/// Represents the result of parsing one token. Beware that the token may be malformed.
#[derive(Debug)]
pub struct ParsedToken {
/// Parsed token.
pub token: Token,
/// If error is present then parsed token is malformed.
pub error: Option<SyntaxError>,
}
#[derive(Debug, Default)]
/// Represents the result of parsing source code of Rust language.
pub struct ParsedTokens {
/// Parsed tokens in order they appear in source code.
pub tokens: Vec<Token>,
/// Collection of all occured tokenization errors.
/// In general `self.errors.len() <= self.tokens.len()`
pub errors: Vec<SyntaxError>,
}
impl ParsedTokens {
/// Append `token` and `error` (if pressent) to the result.
pub fn push(&mut self, ParsedToken { token, error }: ParsedToken) {
self.tokens.push(token);
if let Some(error) = error {
self.errors.push(error)
}
}
}
/// Same as `tokenize_append()`, just a shortcut for creating `ParsedTokens`
/// and returning the result the usual way.
pub fn tokenize(text: &str) -> ParsedTokens {
let mut parsed = ParsedTokens::default();
tokenize_append(text, &mut parsed);
parsed
}
/// Break a string up into its component tokens.
/// Writes to `ParsedTokens` which are basically a pair `(Vec<Token>, Vec<SyntaxError>)`.
/// Beware that it checks for shebang first and its length contributes to resulting
/// tokens offsets.
pub fn tokenize_append(text: &str, parsed: &mut ParsedTokens) {
pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
// non-empty string is a precondtion of `rustc_lexer::strip_shebang()`.
if text.is_empty() {
return;
return Default::default();
}
let mut tokens = Vec::new();
let mut errors = Vec::new();
let mut offset: usize = rustc_lexer::strip_shebang(text)
.map(|shebang_len| {
parsed.tokens.push(Token { kind: SHEBANG, len: TextUnit::from_usize(shebang_len) });
tokens.push(Token { kind: SHEBANG, len: TextUnit::from_usize(shebang_len) });
shebang_len
})
.unwrap_or(0);
@ -72,35 +38,76 @@ pub fn tokenize_append(text: &str, parsed: &mut ParsedTokens) {
let text_without_shebang = &text[offset..];
for rustc_token in rustc_lexer::tokenize(text_without_shebang) {
parsed.push(rustc_token_to_parsed_token(&rustc_token, text, TextUnit::from_usize(offset)));
let token_len = TextUnit::from_usize(rustc_token.len);
let token_range = TextRange::offset_len(TextUnit::from_usize(offset), token_len);
let (syntax_kind, error) =
rustc_token_kind_to_syntax_kind(&rustc_token.kind, &text[token_range]);
tokens.push(Token { kind: syntax_kind, len: token_len });
if let Some(error) = error {
errors.push(SyntaxError::new(SyntaxErrorKind::TokenizeError(error), token_range));
}
offset += rustc_token.len;
}
(tokens, errors)
}
/// Returns `SyntaxKind` and `Option<SyntaxError>` of the first token
/// encountered at the beginning of the string.
///
/// Returns `None` if the string contains zero *or two or more* tokens.
/// The token is malformed if the returned error is not `None`.
///
/// Beware that unescape errors are not checked at tokenization time.
pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> {
first_token(text)
.filter(|(token, _)| token.len.to_usize() == text.len())
.map(|(token, error)| (token.kind, error))
}
/// The same as `single_syntax_kind()` but returns only `SyntaxKind` and
/// returns `None` if any tokenization error occured.
///
/// Beware that unescape errors are not checked at tokenization time.
pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> {
first_token(text)
.filter(|(token, error)| !error.is_some() && token.len.to_usize() == text.len())
.map(|(token, _error)| token.kind)
}
/// Returns the first encountered token at the beginning of the string.
/// If the string contains zero or *two or more tokens* returns `None`.
///
/// The main difference between `first_token()` and `single_token()` is that
/// the latter returns `None` if the string contains more than one token.
pub fn single_token(text: &str) -> Option<ParsedToken> {
first_token(text).filter(|parsed| parsed.token.len.to_usize() == text.len())
}
/// Returns the first encountered token at the beginning of the string.
/// If the string contains zero tokens returns `None`.
/// Returns `None` if the string contains zero tokens or if the token was parsed
/// with an error.
///
/// The main difference between `first_token() and single_token()` is that
/// the latter returns `None` if the string contains more than one token.
pub fn first_token(text: &str) -> Option<ParsedToken> {
/// Beware that unescape errors are not checked at tokenization time.
fn first_token(text: &str) -> Option<(Token, Option<SyntaxError>)> {
// non-empty string is a precondtion of `rustc_lexer::first_token()`.
if text.is_empty() {
None
} else {
let rustc_token = rustc_lexer::first_token(text);
Some(rustc_token_to_parsed_token(&rustc_token, text, TextUnit::from(0)))
return None;
}
let rustc_token = rustc_lexer::first_token(text);
let (syntax_kind, error) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, text);
let token = Token { kind: syntax_kind, len: TextUnit::from_usize(rustc_token.len) };
let error = error.map(|error| {
SyntaxError::new(
SyntaxErrorKind::TokenizeError(error),
TextRange::from_to(TextUnit::from(0), TextUnit::of_str(text)),
)
});
Some((token, error))
}
// FIXME: simplify TokenizeError to `SyntaxError(String, TextRange)` as per @matklad advice:
// https://github.com/rust-analyzer/rust-analyzer/pull/2911/files#r371175067
/// Describes the values of `SyntaxErrorKind::TokenizeError` enum variant.
/// It describes all the types of errors that may happen during the tokenization
/// of Rust source.
@ -136,122 +143,132 @@ pub enum TokenizeError {
LifetimeStartsWithNumber,
}
/// Mapper function that converts `rustc_lexer::Token` with some additional context
/// to `ParsedToken`
fn rustc_token_to_parsed_token(
rustc_token: &rustc_lexer::Token,
text: &str,
token_start_offset: TextUnit,
) -> ParsedToken {
fn rustc_token_kind_to_syntax_kind(
rustc_token_kind: &rustc_lexer::TokenKind,
token_text: &str,
) -> (SyntaxKind, Option<TokenizeError>) {
// A note on an intended tradeoff:
// We drop some useful infromation here (see patterns with double dots `..`)
// Storing that info in `SyntaxKind` is not possible due to its layout requirements of
// being `u16` that come from `rowan::SyntaxKind` type and changes to `rowan::SyntaxKind`
// would mean hell of a rewrite
// being `u16` that come from `rowan::SyntaxKind`.
let token_range =
TextRange::offset_len(token_start_offset, TextUnit::from_usize(rustc_token.len));
let token_text = &text[token_range];
let (syntax_kind, error) = {
let syntax_kind = {
use rustc_lexer::TokenKind as TK;
use TokenizeError as TE;
match rustc_token.kind {
TK::LineComment => ok(COMMENT),
TK::BlockComment { terminated } => {
ok_if(terminated, COMMENT, TE::UnterminatedBlockComment)
match rustc_token_kind {
TK::LineComment => COMMENT,
TK::BlockComment { terminated: true } => COMMENT,
TK::BlockComment { terminated: false } => {
return (COMMENT, Some(TE::UnterminatedBlockComment));
}
TK::Whitespace => ok(WHITESPACE),
TK::Ident => ok(if token_text == "_" {
UNDERSCORE
} else {
SyntaxKind::from_keyword(token_text).unwrap_or(IDENT)
}),
TK::RawIdent => ok(IDENT),
TK::Literal { kind, .. } => match_literal_kind(&kind),
TK::Lifetime { starts_with_number } => {
ok_if(!starts_with_number, LIFETIME, TE::LifetimeStartsWithNumber)
TK::Whitespace => WHITESPACE,
TK::Ident => {
if token_text == "_" {
UNDERSCORE
} else {
SyntaxKind::from_keyword(token_text).unwrap_or(IDENT)
}
}
TK::Semi => ok(SEMI),
TK::Comma => ok(COMMA),
TK::Dot => ok(DOT),
TK::OpenParen => ok(L_PAREN),
TK::CloseParen => ok(R_PAREN),
TK::OpenBrace => ok(L_CURLY),
TK::CloseBrace => ok(R_CURLY),
TK::OpenBracket => ok(L_BRACK),
TK::CloseBracket => ok(R_BRACK),
TK::At => ok(AT),
TK::Pound => ok(POUND),
TK::Tilde => ok(TILDE),
TK::Question => ok(QUESTION),
TK::Colon => ok(COLON),
TK::Dollar => ok(DOLLAR),
TK::Eq => ok(EQ),
TK::Not => ok(EXCL),
TK::Lt => ok(L_ANGLE),
TK::Gt => ok(R_ANGLE),
TK::Minus => ok(MINUS),
TK::And => ok(AMP),
TK::Or => ok(PIPE),
TK::Plus => ok(PLUS),
TK::Star => ok(STAR),
TK::Slash => ok(SLASH),
TK::Caret => ok(CARET),
TK::Percent => ok(PERCENT),
TK::Unknown => ok(ERROR),
TK::RawIdent => IDENT,
TK::Literal { kind, .. } => return match_literal_kind(&kind),
TK::Lifetime { starts_with_number: false } => LIFETIME,
TK::Lifetime { starts_with_number: true } => {
return (LIFETIME, Some(TE::LifetimeStartsWithNumber))
}
TK::Semi => SEMI,
TK::Comma => COMMA,
TK::Dot => DOT,
TK::OpenParen => L_PAREN,
TK::CloseParen => R_PAREN,
TK::OpenBrace => L_CURLY,
TK::CloseBrace => R_CURLY,
TK::OpenBracket => L_BRACK,
TK::CloseBracket => R_BRACK,
TK::At => AT,
TK::Pound => POUND,
TK::Tilde => TILDE,
TK::Question => QUESTION,
TK::Colon => COLON,
TK::Dollar => DOLLAR,
TK::Eq => EQ,
TK::Not => EXCL,
TK::Lt => L_ANGLE,
TK::Gt => R_ANGLE,
TK::Minus => MINUS,
TK::And => AMP,
TK::Or => PIPE,
TK::Plus => PLUS,
TK::Star => STAR,
TK::Slash => SLASH,
TK::Caret => CARET,
TK::Percent => PERCENT,
TK::Unknown => ERROR,
}
};
return ParsedToken {
token: Token { kind: syntax_kind, len: token_range.len() },
error: error
.map(|error| SyntaxError::new(SyntaxErrorKind::TokenizeError(error), token_range)),
};
return (syntax_kind, None);
type ParsedSyntaxKind = (SyntaxKind, Option<TokenizeError>);
fn match_literal_kind(kind: &rustc_lexer::LiteralKind) -> ParsedSyntaxKind {
fn match_literal_kind(kind: &rustc_lexer::LiteralKind) -> (SyntaxKind, Option<TokenizeError>) {
use rustc_lexer::LiteralKind as LK;
use TokenizeError as TE;
match *kind {
LK::Int { empty_int, .. } => ok_if(!empty_int, INT_NUMBER, TE::EmptyInt),
LK::Float { empty_exponent, .. } => {
ok_if(!empty_exponent, FLOAT_NUMBER, TE::EmptyExponent)
}
LK::Char { terminated } => ok_if(terminated, CHAR, TE::UnterminatedChar),
LK::Byte { terminated } => ok_if(terminated, BYTE, TE::UnterminatedByte),
LK::Str { terminated } => ok_if(terminated, STRING, TE::UnterminatedString),
LK::ByteStr { terminated } => {
ok_if(terminated, BYTE_STRING, TE::UnterminatedByteString)
#[rustfmt::skip]
let syntax_kind = match *kind {
LK::Int { empty_int: false, .. } => INT_NUMBER,
LK::Int { empty_int: true, .. } => {
return (INT_NUMBER, Some(TE::EmptyInt))
}
LK::RawStr { started: true, terminated, .. } => {
ok_if(terminated, RAW_STRING, TE::UnterminatedRawString)
LK::Float { empty_exponent: false, .. } => FLOAT_NUMBER,
LK::Float { empty_exponent: true, .. } => {
return (FLOAT_NUMBER, Some(TE::EmptyExponent))
}
LK::RawStr { started: false, .. } => err(RAW_STRING, TE::UnstartedRawString),
LK::RawByteStr { started: true, terminated, .. } => {
ok_if(terminated, RAW_BYTE_STRING, TE::UnterminatedRawByteString)
LK::Char { terminated: true } => CHAR,
LK::Char { terminated: false } => {
return (CHAR, Some(TE::UnterminatedChar))
}
LK::Byte { terminated: true } => BYTE,
LK::Byte { terminated: false } => {
return (BYTE, Some(TE::UnterminatedByte))
}
LK::Str { terminated: true } => STRING,
LK::Str { terminated: false } => {
return (STRING, Some(TE::UnterminatedString))
}
LK::ByteStr { terminated: true } => BYTE_STRING,
LK::ByteStr { terminated: false } => {
return (BYTE_STRING, Some(TE::UnterminatedByteString))
}
LK::RawStr { started: true, terminated: true, .. } => RAW_STRING,
LK::RawStr { started: true, terminated: false, .. } => {
return (RAW_STRING, Some(TE::UnterminatedRawString))
}
LK::RawStr { started: false, .. } => {
return (RAW_STRING, Some(TE::UnstartedRawString))
}
LK::RawByteStr { started: true, terminated: true, .. } => RAW_BYTE_STRING,
LK::RawByteStr { started: true, terminated: false, .. } => {
return (RAW_BYTE_STRING, Some(TE::UnterminatedRawByteString))
}
LK::RawByteStr { started: false, .. } => {
err(RAW_BYTE_STRING, TE::UnstartedRawByteString)
return (RAW_BYTE_STRING, Some(TE::UnstartedRawByteString))
}
}
}
const fn ok(syntax_kind: SyntaxKind) -> ParsedSyntaxKind {
};
(syntax_kind, None)
}
const fn err(syntax_kind: SyntaxKind, error: TokenizeError) -> ParsedSyntaxKind {
(syntax_kind, Some(error))
}
fn ok_if(cond: bool, syntax_kind: SyntaxKind, error: TokenizeError) -> ParsedSyntaxKind {
if cond {
ok(syntax_kind)
} else {
err(syntax_kind, error)
}
}
}

View File

@ -12,7 +12,7 @@ use ra_text_edit::AtomTextEdit;
use crate::{
algo,
parsing::{
lexer::{single_token, tokenize, ParsedTokens, Token},
lexer::{lex_single_syntax_kind, tokenize, Token},
text_token_source::TextTokenSource,
text_tree_sink::TextTreeSink,
},
@ -54,7 +54,7 @@ fn reparse_token<'node>(
}
let mut new_text = get_text_after_edit(prev_token.clone().into(), &edit);
let new_token_kind = single_token(&new_text)?.token.kind;
let (new_token_kind, _error) = lex_single_syntax_kind(&new_text)?;
if new_token_kind != prev_token_kind
|| (new_token_kind == IDENT && is_contextual_kw(&new_text))
@ -67,8 +67,8 @@ fn reparse_token<'node>(
// `b` no longer remains an identifier, but becomes a part of byte string literal
if let Some(next_char) = root.text().char_at(prev_token.text_range().end()) {
new_text.push(next_char);
let token_with_next_char = single_token(&new_text);
if token_with_next_char.is_some() {
let token_with_next_char = lex_single_syntax_kind(&new_text);
if let Some((_kind, _error)) = token_with_next_char {
return None;
}
new_text.pop();
@ -88,23 +88,26 @@ fn reparse_block<'node>(
) -> Option<(GreenNode, Vec<SyntaxError>, TextRange)> {
let (node, reparser) = find_reparsable_node(root, edit.delete)?;
let text = get_text_after_edit(node.clone().into(), &edit);
let ParsedTokens { tokens, errors } = tokenize(&text);
let (tokens, new_lexer_errors) = tokenize(&text);
if !is_balanced(&tokens) {
return None;
}
let mut token_source = TextTokenSource::new(&text, &tokens);
let mut tree_sink = TextTreeSink::new(&text, &tokens, errors);
let mut tree_sink = TextTreeSink::new(&text, &tokens);
reparser.parse(&mut token_source, &mut tree_sink);
let (green, new_errors) = tree_sink.finish();
Some((node.replace_with(green), new_errors, node.text_range()))
let (green, mut new_parser_errors) = tree_sink.finish();
new_parser_errors.extend(new_lexer_errors);
Some((node.replace_with(green), new_parser_errors, node.text_range()))
}
fn get_text_after_edit(element: SyntaxElement, edit: &AtomTextEdit) -> String {
let edit =
AtomTextEdit::replace(edit.delete - element.text_range().start(), edit.insert.clone());
// Note: we could move this match to a method or even further: use enum_dispatch crate
// https://crates.io/crates/enum_dispatch
let text = match element {
NodeOrToken::Token(token) => token.text().to_string(),
NodeOrToken::Node(node) => node.text().to_string(),
@ -122,8 +125,6 @@ fn is_contextual_kw(text: &str) -> bool {
fn find_reparsable_node(node: &SyntaxNode, range: TextRange) -> Option<(SyntaxNode, Reparser)> {
let node = algo::find_covering_element(node, range);
// Note: we could move this match to a method or even further: use enum_dispatch crate
// https://crates.io/crates/enum_dispatch
let mut ancestors = match node {
NodeOrToken::Token(it) => it.parent().ancestors(),
NodeOrToken::Node(it) => it.ancestors(),

View File

@ -92,14 +92,14 @@ impl<'a> TreeSink for TextTreeSink<'a> {
}
impl<'a> TextTreeSink<'a> {
pub(super) fn new(text: &'a str, tokens: &'a [Token], errors: Vec<SyntaxError>) -> Self {
pub(super) fn new(text: &'a str, tokens: &'a [Token]) -> Self {
Self {
text,
tokens,
text_pos: 0.into(),
token_pos: 0,
state: State::PendingStart,
inner: SyntaxTreeBuilder::new(errors),
inner: SyntaxTreeBuilder::default(),
}
}

View File

@ -44,12 +44,6 @@ pub struct SyntaxTreeBuilder {
inner: GreenNodeBuilder<'static>,
}
impl SyntaxTreeBuilder {
pub fn new(errors: Vec<SyntaxError>) -> Self {
Self { errors, inner: GreenNodeBuilder::default() }
}
}
impl SyntaxTreeBuilder {
pub(crate) fn finish_raw(self) -> (GreenNode, Vec<SyntaxError>) {
let green = self.inner.finish();

View File

@ -11,7 +11,7 @@ use crate::{fuzz, SourceFile};
fn lexer_tests() {
dir_tests(&test_data_dir(), &["lexer"], |text, _| {
// FIXME: add tests for errors (their format is up to discussion)
let tokens = crate::tokenize(text).tokens;
let (tokens, _errors) = crate::tokenize(text);
dump_tokens(&tokens, text)
})
}