Merge TokenCursor::{next,next_desugared}
.
And likewise for the inlined variants. I did this for simplicity, but interesting it was a performance win as well.
This commit is contained in:
parent
89ec75b0e9
commit
b1e6dee596
@ -100,21 +100,16 @@ struct LazyTokenStreamImpl {
|
||||
|
||||
impl CreateTokenStream for LazyTokenStreamImpl {
|
||||
fn create_token_stream(&self) -> AttrAnnotatedTokenStream {
|
||||
// The token produced by the final call to `{,inlined_}next` or
|
||||
// `{,inlined_}next_desugared` was not actually consumed by the
|
||||
// callback. The combination of chaining the initial token and using
|
||||
// `take` produces the desired result - we produce an empty
|
||||
// `TokenStream` if no calls were made, and omit the final token
|
||||
// otherwise.
|
||||
// The token produced by the final call to `{,inlined_}next` was not
|
||||
// actually consumed by the callback. The combination of chaining the
|
||||
// initial token and using `take` produces the desired result - we
|
||||
// produce an empty `TokenStream` if no calls were made, and omit the
|
||||
// final token otherwise.
|
||||
let mut cursor_snapshot = self.cursor_snapshot.clone();
|
||||
let tokens =
|
||||
std::iter::once((FlatToken::Token(self.start_token.0.clone()), self.start_token.1))
|
||||
.chain((0..self.num_calls).map(|_| {
|
||||
let token = if cursor_snapshot.desugar_doc_comments {
|
||||
cursor_snapshot.next_desugared()
|
||||
} else {
|
||||
cursor_snapshot.next()
|
||||
};
|
||||
let token = cursor_snapshot.next(cursor_snapshot.desugar_doc_comments);
|
||||
(FlatToken::Token(token.0), token.1)
|
||||
}))
|
||||
.take(self.num_calls);
|
||||
|
@ -206,9 +206,7 @@ struct TokenCursor {
|
||||
frame: TokenCursorFrame,
|
||||
stack: Vec<TokenCursorFrame>,
|
||||
desugar_doc_comments: bool,
|
||||
// Counts the number of calls to `{,inlined_}next` or
|
||||
// `{,inlined_}next_desugared`, depending on whether
|
||||
// `desugar_doc_comments` is set.
|
||||
// Counts the number of calls to `{,inlined_}next`.
|
||||
num_next_calls: usize,
|
||||
// During parsing, we may sometimes need to 'unglue' a
|
||||
// glued token into two component tokens
|
||||
@ -256,14 +254,14 @@ fn new(span: DelimSpan, delim: DelimToken, tts: TokenStream) -> Self {
|
||||
}
|
||||
|
||||
impl TokenCursor {
|
||||
fn next(&mut self) -> (Token, Spacing) {
|
||||
self.inlined_next()
|
||||
fn next(&mut self, desugar_doc_comments: bool) -> (Token, Spacing) {
|
||||
self.inlined_next(desugar_doc_comments)
|
||||
}
|
||||
|
||||
/// This always-inlined version should only be used on hot code paths.
|
||||
#[inline(always)]
|
||||
fn inlined_next(&mut self) -> (Token, Spacing) {
|
||||
loop {
|
||||
fn inlined_next(&mut self, desugar_doc_comments: bool) -> (Token, Spacing) {
|
||||
let (token, spacing) = loop {
|
||||
let (tree, spacing) = if !self.frame.open_delim {
|
||||
self.frame.open_delim = true;
|
||||
TokenTree::token(token::OpenDelim(self.frame.delim), self.frame.span.open).into()
|
||||
@ -281,77 +279,74 @@ fn inlined_next(&mut self) -> (Token, Spacing) {
|
||||
|
||||
match tree {
|
||||
TokenTree::Token(token) => {
|
||||
return (token, spacing);
|
||||
break (token, spacing);
|
||||
}
|
||||
TokenTree::Delimited(sp, delim, tts) => {
|
||||
let frame = TokenCursorFrame::new(sp, delim, tts);
|
||||
self.stack.push(mem::replace(&mut self.frame, frame));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn next_desugared(&mut self) -> (Token, Spacing) {
|
||||
self.inlined_next_desugared()
|
||||
}
|
||||
|
||||
/// This always-inlined version should only be used on hot code paths.
|
||||
#[inline(always)]
|
||||
fn inlined_next_desugared(&mut self) -> (Token, Spacing) {
|
||||
let (data, attr_style, sp) = match self.inlined_next() {
|
||||
(Token { kind: token::DocComment(_, attr_style, data), span }, _) => {
|
||||
(data, attr_style, span)
|
||||
}
|
||||
tok => return tok,
|
||||
};
|
||||
|
||||
// Searches for the occurrences of `"#*` and returns the minimum number of `#`s
|
||||
// required to wrap the text.
|
||||
let mut num_of_hashes = 0;
|
||||
let mut count = 0;
|
||||
for ch in data.as_str().chars() {
|
||||
count = match ch {
|
||||
'"' => 1,
|
||||
'#' if count > 0 => count + 1,
|
||||
_ => 0,
|
||||
};
|
||||
num_of_hashes = cmp::max(num_of_hashes, count);
|
||||
match (desugar_doc_comments, &token) {
|
||||
(true, &Token { kind: token::DocComment(_, attr_style, data), span }) => {
|
||||
// Searches for the occurrences of `"#*` and returns the minimum number of `#`s
|
||||
// required to wrap the text.
|
||||
let mut num_of_hashes = 0;
|
||||
let mut count = 0;
|
||||
for ch in data.as_str().chars() {
|
||||
count = match ch {
|
||||
'"' => 1,
|
||||
'#' if count > 0 => count + 1,
|
||||
_ => 0,
|
||||
};
|
||||
num_of_hashes = cmp::max(num_of_hashes, count);
|
||||
}
|
||||
|
||||
let delim_span = DelimSpan::from_single(span);
|
||||
let body = TokenTree::Delimited(
|
||||
delim_span,
|
||||
token::Bracket,
|
||||
[
|
||||
TokenTree::token(token::Ident(sym::doc, false), span),
|
||||
TokenTree::token(token::Eq, span),
|
||||
TokenTree::token(
|
||||
TokenKind::lit(token::StrRaw(num_of_hashes), data, None),
|
||||
span,
|
||||
),
|
||||
]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect::<TokenStream>(),
|
||||
);
|
||||
|
||||
self.stack.push(mem::replace(
|
||||
&mut self.frame,
|
||||
TokenCursorFrame::new(
|
||||
delim_span,
|
||||
token::NoDelim,
|
||||
if attr_style == AttrStyle::Inner {
|
||||
[
|
||||
TokenTree::token(token::Pound, span),
|
||||
TokenTree::token(token::Not, span),
|
||||
body,
|
||||
]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect::<TokenStream>()
|
||||
} else {
|
||||
[TokenTree::token(token::Pound, span), body]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect::<TokenStream>()
|
||||
},
|
||||
),
|
||||
));
|
||||
|
||||
self.next(/* desugar_doc_comments */ false)
|
||||
}
|
||||
_ => (token, spacing),
|
||||
}
|
||||
|
||||
let delim_span = DelimSpan::from_single(sp);
|
||||
let body = TokenTree::Delimited(
|
||||
delim_span,
|
||||
token::Bracket,
|
||||
[
|
||||
TokenTree::token(token::Ident(sym::doc, false), sp),
|
||||
TokenTree::token(token::Eq, sp),
|
||||
TokenTree::token(TokenKind::lit(token::StrRaw(num_of_hashes), data, None), sp),
|
||||
]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect::<TokenStream>(),
|
||||
);
|
||||
|
||||
self.stack.push(mem::replace(
|
||||
&mut self.frame,
|
||||
TokenCursorFrame::new(
|
||||
delim_span,
|
||||
token::NoDelim,
|
||||
if attr_style == AttrStyle::Inner {
|
||||
[TokenTree::token(token::Pound, sp), TokenTree::token(token::Not, sp), body]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect::<TokenStream>()
|
||||
} else {
|
||||
[TokenTree::token(token::Pound, sp), body]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect::<TokenStream>()
|
||||
},
|
||||
),
|
||||
));
|
||||
|
||||
self.next()
|
||||
}
|
||||
}
|
||||
|
||||
@ -1010,11 +1005,7 @@ fn inlined_bump_with(&mut self, (next_token, next_spacing): (Token, Spacing)) {
|
||||
pub fn bump(&mut self) {
|
||||
let fallback_span = self.token.span;
|
||||
loop {
|
||||
let (mut next, spacing) = if self.desugar_doc_comments {
|
||||
self.token_cursor.inlined_next_desugared()
|
||||
} else {
|
||||
self.token_cursor.inlined_next()
|
||||
};
|
||||
let (mut next, spacing) = self.token_cursor.inlined_next(self.desugar_doc_comments);
|
||||
self.token_cursor.num_next_calls += 1;
|
||||
// We've retrieved an token from the underlying
|
||||
// cursor, so we no longer need to worry about
|
||||
@ -1063,7 +1054,7 @@ pub fn look_ahead<R>(&self, dist: usize, looker: impl FnOnce(&Token) -> R) -> R
|
||||
let mut i = 0;
|
||||
let mut token = Token::dummy();
|
||||
while i < dist {
|
||||
token = cursor.next().0;
|
||||
token = cursor.next(/* desugar_doc_comments */ false).0;
|
||||
if matches!(
|
||||
token.kind,
|
||||
token::OpenDelim(token::NoDelim) | token::CloseDelim(token::NoDelim)
|
||||
|
Loading…
Reference in New Issue
Block a user