diff --git a/compiler/rustc_ast/src/tokenstream.rs b/compiler/rustc_ast/src/tokenstream.rs index aadcfa7fed5..8e80161af1b 100644 --- a/compiler/rustc_ast/src/tokenstream.rs +++ b/compiler/rustc_ast/src/tokenstream.rs @@ -706,7 +706,7 @@ fn next(&mut self) -> Option<&'t TokenTree> { /// involve associated types) for getting individual elements, or /// `RefTokenTreeCursor` if you really want an `Iterator`, e.g. in a `for` /// loop. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct TokenTreeCursor { pub stream: TokenStream, index: usize, diff --git a/compiler/rustc_parse/src/parser/mod.rs b/compiler/rustc_parse/src/parser/mod.rs index ec025b44b7b..2f688c60765 100644 --- a/compiler/rustc_parse/src/parser/mod.rs +++ b/compiler/rustc_parse/src/parser/mod.rs @@ -19,6 +19,7 @@ pub use pat::{CommaRecoveryMode, RecoverColon, RecoverComma}; pub use path::PathStyle; +use core::fmt; use rustc_ast::ptr::P; use rustc_ast::token::{self, Delimiter, Token, TokenKind}; use rustc_ast::tokenstream::{AttributesData, DelimSpacing, DelimSpan, Spacing}; @@ -46,7 +47,7 @@ }; bitflags::bitflags! { - #[derive(Clone, Copy)] + #[derive(Clone, Copy, Debug)] struct Restrictions: u8 { const STMT_EXPR = 1 << 0; const NO_STRUCT_LITERAL = 1 << 1; @@ -72,7 +73,7 @@ enum BlockMode { /// Whether or not we should force collection of tokens for an AST node, /// regardless of whether or not it has attributes -#[derive(Clone, Copy, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq)] pub enum ForceCollect { Yes, No, @@ -120,7 +121,7 @@ macro_rules! maybe_recover_from_interpolated_ty_qpath { }; } -#[derive(Clone, Copy)] +#[derive(Clone, Copy, Debug)] pub enum Recovery { Allowed, Forbidden, @@ -182,7 +183,7 @@ pub struct Parser<'a> { rustc_data_structures::static_assert_size!(Parser<'_>, 264); /// Stores span information about a closure. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct ClosureSpans { pub whole_closure: Span, pub closing_pipe: Span, @@ -211,7 +212,7 @@ pub struct ClosureSpans { /// Controls how we capture tokens. Capturing can be expensive, /// so we try to avoid performing capturing in cases where /// we will never need an `AttrTokenStream`. -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Debug)] pub enum Capturing { /// We aren't performing any capturing - this is the default mode. No, @@ -219,7 +220,7 @@ pub enum Capturing { Yes, } -#[derive(Clone)] +#[derive(Clone, Debug)] struct CaptureState { capturing: Capturing, replace_ranges: Vec, @@ -230,7 +231,7 @@ struct CaptureState { /// we (a) lex tokens into a nice tree structure (`TokenStream`), and then (b) /// use this type to emit them as a linear sequence. But a linear sequence is /// what the parser expects, for the most part. -#[derive(Clone)] +#[derive(Clone, Debug)] struct TokenCursor { // Cursor for the current (innermost) token stream. The delimiters for this // token stream are found in `self.stack.last()`; when that is `None` then @@ -335,6 +336,7 @@ enum TokenExpectType { } /// A sequence separator. +#[derive(Debug)] struct SeqSep { /// The separator token. sep: Option, @@ -352,6 +354,7 @@ fn none() -> SeqSep { } } +#[derive(Debug)] pub enum FollowedByType { Yes, No, @@ -376,7 +379,7 @@ pub enum Trailing { Yes, } -#[derive(Clone, Copy, PartialEq, Eq)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum TokenDescription { ReservedIdentifier, Keyword,