Merge TokenCursor::{next,next_desugared}
.
And likewise for the inlined variants. I did this for simplicity, but interesting it was a performance win as well.
This commit is contained in:
parent
89ec75b0e9
commit
b1e6dee596
@ -100,21 +100,16 @@ struct LazyTokenStreamImpl {
|
|||||||
|
|
||||||
impl CreateTokenStream for LazyTokenStreamImpl {
|
impl CreateTokenStream for LazyTokenStreamImpl {
|
||||||
fn create_token_stream(&self) -> AttrAnnotatedTokenStream {
|
fn create_token_stream(&self) -> AttrAnnotatedTokenStream {
|
||||||
// The token produced by the final call to `{,inlined_}next` or
|
// The token produced by the final call to `{,inlined_}next` was not
|
||||||
// `{,inlined_}next_desugared` was not actually consumed by the
|
// actually consumed by the callback. The combination of chaining the
|
||||||
// callback. The combination of chaining the initial token and using
|
// initial token and using `take` produces the desired result - we
|
||||||
// `take` produces the desired result - we produce an empty
|
// produce an empty `TokenStream` if no calls were made, and omit the
|
||||||
// `TokenStream` if no calls were made, and omit the final token
|
// final token otherwise.
|
||||||
// otherwise.
|
|
||||||
let mut cursor_snapshot = self.cursor_snapshot.clone();
|
let mut cursor_snapshot = self.cursor_snapshot.clone();
|
||||||
let tokens =
|
let tokens =
|
||||||
std::iter::once((FlatToken::Token(self.start_token.0.clone()), self.start_token.1))
|
std::iter::once((FlatToken::Token(self.start_token.0.clone()), self.start_token.1))
|
||||||
.chain((0..self.num_calls).map(|_| {
|
.chain((0..self.num_calls).map(|_| {
|
||||||
let token = if cursor_snapshot.desugar_doc_comments {
|
let token = cursor_snapshot.next(cursor_snapshot.desugar_doc_comments);
|
||||||
cursor_snapshot.next_desugared()
|
|
||||||
} else {
|
|
||||||
cursor_snapshot.next()
|
|
||||||
};
|
|
||||||
(FlatToken::Token(token.0), token.1)
|
(FlatToken::Token(token.0), token.1)
|
||||||
}))
|
}))
|
||||||
.take(self.num_calls);
|
.take(self.num_calls);
|
||||||
|
@ -206,9 +206,7 @@ struct TokenCursor {
|
|||||||
frame: TokenCursorFrame,
|
frame: TokenCursorFrame,
|
||||||
stack: Vec<TokenCursorFrame>,
|
stack: Vec<TokenCursorFrame>,
|
||||||
desugar_doc_comments: bool,
|
desugar_doc_comments: bool,
|
||||||
// Counts the number of calls to `{,inlined_}next` or
|
// Counts the number of calls to `{,inlined_}next`.
|
||||||
// `{,inlined_}next_desugared`, depending on whether
|
|
||||||
// `desugar_doc_comments` is set.
|
|
||||||
num_next_calls: usize,
|
num_next_calls: usize,
|
||||||
// During parsing, we may sometimes need to 'unglue' a
|
// During parsing, we may sometimes need to 'unglue' a
|
||||||
// glued token into two component tokens
|
// glued token into two component tokens
|
||||||
@ -256,14 +254,14 @@ fn new(span: DelimSpan, delim: DelimToken, tts: TokenStream) -> Self {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl TokenCursor {
|
impl TokenCursor {
|
||||||
fn next(&mut self) -> (Token, Spacing) {
|
fn next(&mut self, desugar_doc_comments: bool) -> (Token, Spacing) {
|
||||||
self.inlined_next()
|
self.inlined_next(desugar_doc_comments)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This always-inlined version should only be used on hot code paths.
|
/// This always-inlined version should only be used on hot code paths.
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn inlined_next(&mut self) -> (Token, Spacing) {
|
fn inlined_next(&mut self, desugar_doc_comments: bool) -> (Token, Spacing) {
|
||||||
loop {
|
let (token, spacing) = loop {
|
||||||
let (tree, spacing) = if !self.frame.open_delim {
|
let (tree, spacing) = if !self.frame.open_delim {
|
||||||
self.frame.open_delim = true;
|
self.frame.open_delim = true;
|
||||||
TokenTree::token(token::OpenDelim(self.frame.delim), self.frame.span.open).into()
|
TokenTree::token(token::OpenDelim(self.frame.delim), self.frame.span.open).into()
|
||||||
@ -281,77 +279,74 @@ fn inlined_next(&mut self) -> (Token, Spacing) {
|
|||||||
|
|
||||||
match tree {
|
match tree {
|
||||||
TokenTree::Token(token) => {
|
TokenTree::Token(token) => {
|
||||||
return (token, spacing);
|
break (token, spacing);
|
||||||
}
|
}
|
||||||
TokenTree::Delimited(sp, delim, tts) => {
|
TokenTree::Delimited(sp, delim, tts) => {
|
||||||
let frame = TokenCursorFrame::new(sp, delim, tts);
|
let frame = TokenCursorFrame::new(sp, delim, tts);
|
||||||
self.stack.push(mem::replace(&mut self.frame, frame));
|
self.stack.push(mem::replace(&mut self.frame, frame));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn next_desugared(&mut self) -> (Token, Spacing) {
|
|
||||||
self.inlined_next_desugared()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This always-inlined version should only be used on hot code paths.
|
|
||||||
#[inline(always)]
|
|
||||||
fn inlined_next_desugared(&mut self) -> (Token, Spacing) {
|
|
||||||
let (data, attr_style, sp) = match self.inlined_next() {
|
|
||||||
(Token { kind: token::DocComment(_, attr_style, data), span }, _) => {
|
|
||||||
(data, attr_style, span)
|
|
||||||
}
|
|
||||||
tok => return tok,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Searches for the occurrences of `"#*` and returns the minimum number of `#`s
|
match (desugar_doc_comments, &token) {
|
||||||
// required to wrap the text.
|
(true, &Token { kind: token::DocComment(_, attr_style, data), span }) => {
|
||||||
let mut num_of_hashes = 0;
|
// Searches for the occurrences of `"#*` and returns the minimum number of `#`s
|
||||||
let mut count = 0;
|
// required to wrap the text.
|
||||||
for ch in data.as_str().chars() {
|
let mut num_of_hashes = 0;
|
||||||
count = match ch {
|
let mut count = 0;
|
||||||
'"' => 1,
|
for ch in data.as_str().chars() {
|
||||||
'#' if count > 0 => count + 1,
|
count = match ch {
|
||||||
_ => 0,
|
'"' => 1,
|
||||||
};
|
'#' if count > 0 => count + 1,
|
||||||
num_of_hashes = cmp::max(num_of_hashes, count);
|
_ => 0,
|
||||||
|
};
|
||||||
|
num_of_hashes = cmp::max(num_of_hashes, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
let delim_span = DelimSpan::from_single(span);
|
||||||
|
let body = TokenTree::Delimited(
|
||||||
|
delim_span,
|
||||||
|
token::Bracket,
|
||||||
|
[
|
||||||
|
TokenTree::token(token::Ident(sym::doc, false), span),
|
||||||
|
TokenTree::token(token::Eq, span),
|
||||||
|
TokenTree::token(
|
||||||
|
TokenKind::lit(token::StrRaw(num_of_hashes), data, None),
|
||||||
|
span,
|
||||||
|
),
|
||||||
|
]
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.collect::<TokenStream>(),
|
||||||
|
);
|
||||||
|
|
||||||
|
self.stack.push(mem::replace(
|
||||||
|
&mut self.frame,
|
||||||
|
TokenCursorFrame::new(
|
||||||
|
delim_span,
|
||||||
|
token::NoDelim,
|
||||||
|
if attr_style == AttrStyle::Inner {
|
||||||
|
[
|
||||||
|
TokenTree::token(token::Pound, span),
|
||||||
|
TokenTree::token(token::Not, span),
|
||||||
|
body,
|
||||||
|
]
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.collect::<TokenStream>()
|
||||||
|
} else {
|
||||||
|
[TokenTree::token(token::Pound, span), body]
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.collect::<TokenStream>()
|
||||||
|
},
|
||||||
|
),
|
||||||
|
));
|
||||||
|
|
||||||
|
self.next(/* desugar_doc_comments */ false)
|
||||||
|
}
|
||||||
|
_ => (token, spacing),
|
||||||
}
|
}
|
||||||
|
|
||||||
let delim_span = DelimSpan::from_single(sp);
|
|
||||||
let body = TokenTree::Delimited(
|
|
||||||
delim_span,
|
|
||||||
token::Bracket,
|
|
||||||
[
|
|
||||||
TokenTree::token(token::Ident(sym::doc, false), sp),
|
|
||||||
TokenTree::token(token::Eq, sp),
|
|
||||||
TokenTree::token(TokenKind::lit(token::StrRaw(num_of_hashes), data, None), sp),
|
|
||||||
]
|
|
||||||
.iter()
|
|
||||||
.cloned()
|
|
||||||
.collect::<TokenStream>(),
|
|
||||||
);
|
|
||||||
|
|
||||||
self.stack.push(mem::replace(
|
|
||||||
&mut self.frame,
|
|
||||||
TokenCursorFrame::new(
|
|
||||||
delim_span,
|
|
||||||
token::NoDelim,
|
|
||||||
if attr_style == AttrStyle::Inner {
|
|
||||||
[TokenTree::token(token::Pound, sp), TokenTree::token(token::Not, sp), body]
|
|
||||||
.iter()
|
|
||||||
.cloned()
|
|
||||||
.collect::<TokenStream>()
|
|
||||||
} else {
|
|
||||||
[TokenTree::token(token::Pound, sp), body]
|
|
||||||
.iter()
|
|
||||||
.cloned()
|
|
||||||
.collect::<TokenStream>()
|
|
||||||
},
|
|
||||||
),
|
|
||||||
));
|
|
||||||
|
|
||||||
self.next()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1010,11 +1005,7 @@ fn inlined_bump_with(&mut self, (next_token, next_spacing): (Token, Spacing)) {
|
|||||||
pub fn bump(&mut self) {
|
pub fn bump(&mut self) {
|
||||||
let fallback_span = self.token.span;
|
let fallback_span = self.token.span;
|
||||||
loop {
|
loop {
|
||||||
let (mut next, spacing) = if self.desugar_doc_comments {
|
let (mut next, spacing) = self.token_cursor.inlined_next(self.desugar_doc_comments);
|
||||||
self.token_cursor.inlined_next_desugared()
|
|
||||||
} else {
|
|
||||||
self.token_cursor.inlined_next()
|
|
||||||
};
|
|
||||||
self.token_cursor.num_next_calls += 1;
|
self.token_cursor.num_next_calls += 1;
|
||||||
// We've retrieved an token from the underlying
|
// We've retrieved an token from the underlying
|
||||||
// cursor, so we no longer need to worry about
|
// cursor, so we no longer need to worry about
|
||||||
@ -1063,7 +1054,7 @@ pub fn look_ahead<R>(&self, dist: usize, looker: impl FnOnce(&Token) -> R) -> R
|
|||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
let mut token = Token::dummy();
|
let mut token = Token::dummy();
|
||||||
while i < dist {
|
while i < dist {
|
||||||
token = cursor.next().0;
|
token = cursor.next(/* desugar_doc_comments */ false).0;
|
||||||
if matches!(
|
if matches!(
|
||||||
token.kind,
|
token.kind,
|
||||||
token::OpenDelim(token::NoDelim) | token::CloseDelim(token::NoDelim)
|
token::OpenDelim(token::NoDelim) | token::CloseDelim(token::NoDelim)
|
||||||
|
Loading…
Reference in New Issue
Block a user