auto merge of #18365 : bjz/rust/token, r=alexcrichton

[breaking-change]

(for syntax-extensions)

- Token variant identifiers have been converted to PascalCase for consistency with Rust coding standards
- Some free-functions in `syntax::token` have been converted to methods on `syntax::token::Token`:
    - `can_begin_expr`         -> `Token::can_begin_expr`
    - `close_delimiter_for`    -> `Token::get_close_delimiter`
    - `is_lit`                 -> `Token::is_lit`
    - `is_ident`               -> `Token::is_ident`
    - `is_path`                -> `Token::is_path`
    - `is_plain_ident`         -> `Token::is_plain_ident`
    - `is_lifetime`            -> `Token::is_lifetime`
    - `is_mutability`          -> `Token::is_mutability`
    - `to_binop`               -> `Token::to_binop`
    - `is_keyword`             -> `Token::is_keyword`
    - `is_any_keyword`         -> `Token:is_any_keyword`
    - `is_strict_keyword`      -> `Token::is_strict_keyword`
    - `is_reserved_keyword`    -> `Token::is_reserved_keyword`
    - `mtwt_token_eq`          -> `Token::mtwt_eq`
- `token::Ident` now takes an enum instead of a boolean for clarity
- `token::{to_string, binop_to_string}` were moved to `pprust::{token_to_string, binop_to_string}`
This commit is contained in:
bors 2014-10-29 10:22:01 +00:00
commit 3bc545373d
29 changed files with 1501 additions and 1421 deletions

View File

@ -55,7 +55,7 @@ extern crate syntax;
extern crate rustc;
use syntax::codemap::Span;
use syntax::parse::token::{IDENT, get_ident};
use syntax::parse::token;
use syntax::ast::{TokenTree, TtToken};
use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacExpr};
use syntax::ext::build::AstBuilder; // trait for expr_uint
@ -71,7 +71,7 @@ fn expand_rn(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree])
("I", 1)];
let text = match args {
[TtToken(_, IDENT(s, _))] => get_ident(s).to_string(),
[TtToken(_, token::Ident(s, _))] => token::get_ident(s).to_string(),
_ => {
cx.span_err(sp, "argument should be a single identifier");
return DummyResult::any(sp);

View File

@ -30,12 +30,12 @@ use rustc::driver::{session, config};
use syntax::ast;
use syntax::ast::Name;
use syntax::parse::token::*;
use syntax::parse::token;
use syntax::parse::lexer::TokenAndSpan;
fn parse_token_list(file: &str) -> HashMap<String, Token> {
fn id() -> Token {
IDENT(ast::Ident { name: Name(0), ctxt: 0, }, false)
token::Ident(ast::Ident { name: Name(0), ctxt: 0, }, token::Plain)
}
let mut res = HashMap::new();
@ -52,64 +52,64 @@ fn parse_token_list(file: &str) -> HashMap<String, Token> {
let num = line.slice_from(eq + 1);
let tok = match val {
"SHR" => BINOP(SHR),
"DOLLAR" => DOLLAR,
"LT" => LT,
"STAR" => BINOP(STAR),
"FLOAT_SUFFIX" => id(),
"INT_SUFFIX" => id(),
"SHL" => BINOP(SHL),
"LBRACE" => LBRACE,
"RARROW" => RARROW,
"LIT_STR" => LIT_STR(Name(0)),
"DOTDOT" => DOTDOT,
"MOD_SEP" => MOD_SEP,
"DOTDOTDOT" => DOTDOTDOT,
"NOT" => NOT,
"AND" => BINOP(AND),
"LPAREN" => LPAREN,
"ANDAND" => ANDAND,
"AT" => AT,
"LBRACKET" => LBRACKET,
"LIT_STR_RAW" => LIT_STR_RAW(Name(0), 0),
"RPAREN" => RPAREN,
"SLASH" => BINOP(SLASH),
"COMMA" => COMMA,
"LIFETIME" => LIFETIME(ast::Ident { name: Name(0), ctxt: 0 }),
"CARET" => BINOP(CARET),
"TILDE" => TILDE,
"IDENT" => id(),
"PLUS" => BINOP(PLUS),
"LIT_CHAR" => LIT_CHAR(Name(0)),
"LIT_BYTE" => LIT_BYTE(Name(0)),
"EQ" => EQ,
"RBRACKET" => RBRACKET,
"COMMENT" => COMMENT,
"DOC_COMMENT" => DOC_COMMENT(Name(0)),
"DOT" => DOT,
"EQEQ" => EQEQ,
"NE" => NE,
"GE" => GE,
"PERCENT" => BINOP(PERCENT),
"RBRACE" => RBRACE,
"BINOP" => BINOP(PLUS),
"POUND" => POUND,
"OROR" => OROR,
"LIT_INTEGER" => LIT_INTEGER(Name(0)),
"BINOPEQ" => BINOPEQ(PLUS),
"LIT_FLOAT" => LIT_FLOAT(Name(0)),
"WHITESPACE" => WS,
"UNDERSCORE" => UNDERSCORE,
"MINUS" => BINOP(MINUS),
"SEMI" => SEMI,
"COLON" => COLON,
"FAT_ARROW" => FAT_ARROW,
"OR" => BINOP(OR),
"GT" => GT,
"LE" => LE,
"LIT_BINARY" => LIT_BINARY(Name(0)),
"LIT_BINARY_RAW" => LIT_BINARY_RAW(Name(0), 0),
_ => continue
"SHR" => token::BinOp(token::Shr),
"DOLLAR" => token::Dollar,
"LT" => token::Lt,
"STAR" => token::BinOp(token::Star),
"FLOAT_SUFFIX" => id(),
"INT_SUFFIX" => id(),
"SHL" => token::BinOp(token::Shl),
"LBRACE" => token::LBrace,
"RARROW" => token::Rarrow,
"LIT_STR" => token::LitStr(Name(0)),
"DOTDOT" => token::DotDot,
"MOD_SEP" => token::ModSep,
"DOTDOTDOT" => token::DotDotDot,
"NOT" => token::Not,
"AND" => token::BinOp(token::And),
"LPAREN" => token::LParen,
"ANDAND" => token::AndAnd,
"AT" => token::At,
"LBRACKET" => token::LBracket,
"LIT_STR_RAW" => token::LitStrRaw(Name(0), 0),
"RPAREN" => token::RParen,
"SLASH" => token::BinOp(token::Slash),
"COMMA" => token::Comma,
"LIFETIME" => token::Lifetime(ast::Ident { name: Name(0), ctxt: 0 }),
"CARET" => token::BinOp(token::Caret),
"TILDE" => token::Tilde,
"IDENT" => token::Id(),
"PLUS" => token::BinOp(token::Plus),
"LIT_CHAR" => token::LitChar(Name(0)),
"LIT_BYTE" => token::LitByte(Name(0)),
"EQ" => token::Eq,
"RBRACKET" => token::RBracket,
"COMMENT" => token::Comment,
"DOC_COMMENT" => token::DocComment(Name(0)),
"DOT" => token::Dot,
"EQEQ" => token::EqEq,
"NE" => token::Ne,
"GE" => token::Ge,
"PERCENT" => token::BinOp(token::Percent),
"RBRACE" => token::RBrace,
"BINOP" => token::BinOp(token::Plus),
"POUND" => token::Pound,
"OROR" => token::OrOr,
"LIT_INTEGER" => token::LitInteger(Name(0)),
"BINOPEQ" => token::BinOpEq(token::Plus),
"LIT_FLOAT" => token::LitFloat(Name(0)),
"WHITESPACE" => token::Whitespace,
"UNDERSCORE" => token::Underscore,
"MINUS" => token::BinOp(token::Minus),
"SEMI" => token::Semi,
"COLON" => token::Colon,
"FAT_ARROW" => token::FatArrow,
"OR" => token::BinOp(token::Or),
"GT" => token::Gt,
"LE" => token::Le,
"LIT_BINARY" => token::LitBinary(Name(0)),
"LIT_BINARY_RAW" => token::LitBinaryRaw(Name(0), 0),
_ => continue,
};
res.insert(num.to_string(), tok);
@ -119,19 +119,19 @@ fn parse_token_list(file: &str) -> HashMap<String, Token> {
res
}
fn str_to_binop(s: &str) -> BinOp {
fn str_to_binop(s: &str) -> BinOpToken {
match s {
"+" => PLUS,
"/" => SLASH,
"-" => MINUS,
"*" => STAR,
"%" => PERCENT,
"^" => CARET,
"&" => AND,
"|" => OR,
"<<" => SHL,
">>" => SHR,
_ => fail!("Bad binop str `{}`", s)
"+" => token::Plus,
"/" => token::Slash,
"-" => token::Minus,
"*" => token::Star,
"%" => token::Percent,
"^" => token::Caret,
"&" => token::And,
"|" => token::Or,
"<<" => token::Shl,
">>" => token::Shr,
_ => fail!("Bad binop str `{}`", s),
}
}
@ -186,19 +186,21 @@ fn parse_antlr_token(s: &str, tokens: &HashMap<String, Token>) -> TokenAndSpan {
debug!("What we got: content (`{}`), proto: {}", content, proto_tok);
let real_tok = match *proto_tok {
BINOP(..) => BINOP(str_to_binop(content)),
BINOPEQ(..) => BINOPEQ(str_to_binop(content.slice_to(content.len() - 1))),
LIT_STR(..) => LIT_STR(fix(content)),
LIT_STR_RAW(..) => LIT_STR_RAW(fix(content), count(content)),
LIT_CHAR(..) => LIT_CHAR(fixchar(content)),
LIT_BYTE(..) => LIT_BYTE(fixchar(content)),
DOC_COMMENT(..) => DOC_COMMENT(nm),
LIT_INTEGER(..) => LIT_INTEGER(nm),
LIT_FLOAT(..) => LIT_FLOAT(nm),
LIT_BINARY(..) => LIT_BINARY(nm),
LIT_BINARY_RAW(..) => LIT_BINARY_RAW(fix(content), count(content)),
IDENT(..) => IDENT(ast::Ident { name: nm, ctxt: 0 }, true),
LIFETIME(..) => LIFETIME(ast::Ident { name: nm, ctxt: 0 }),
token::BinOp(..) => token::BinOp(str_to_binop(content)),
token::BinOpEq(..) => token::BinOpEq(str_to_binop(content.slice_to(
content.len() - 1))),
token::LitStr(..) => token::LitStr(fix(content)),
token::LitStrRaw(..) => token::LitStrRaw(fix(content), count(content)),
token::LitChar(..) => token::LitChar(fixchar(content)),
token::LitByte(..) => token::LitByte(fixchar(content)),
token::DocComment(..) => token::DocComment(nm),
token::LitInteger(..) => token::LitInteger(nm),
token::LitFloat(..) => token::LitFloat(nm),
token::LitBinary(..) => token::LitBinary(nm),
token::LitBinaryRaw(..) => token::LitBinaryRaw(fix(content), count(content)),
token::Ident(..) => token::Ident(ast::Ident { name: nm, ctxt: 0 },
token::ModName),
token::Lifetime(..) => token::Lifetime(ast::Ident { name: nm, ctxt: 0 }),
ref t => t.clone()
};
@ -222,8 +224,8 @@ fn parse_antlr_token(s: &str, tokens: &HashMap<String, Token>) -> TokenAndSpan {
fn tok_cmp(a: &Token, b: &Token) -> bool {
match a {
&IDENT(id, _) => match b {
&IDENT(id2, _) => id == id2,
&token::Ident(id, _) => match b {
&token::Ident(id2, _) => id == id2,
_ => false
},
_ => a == b
@ -281,19 +283,20 @@ fn main() {
)
)
matches!(LIT_BYTE(..),
LIT_CHAR(..),
LIT_INTEGER(..),
LIT_FLOAT(..),
LIT_STR(..),
LIT_STR_RAW(..),
LIT_BINARY(..),
LIT_BINARY_RAW(..),
IDENT(..),
LIFETIME(..),
INTERPOLATED(..),
DOC_COMMENT(..),
SHEBANG(..)
matches!(
LitByte(..),
LitChar(..),
LitInteger(..),
LitFloat(..),
LitStr(..),
LitStrRaw(..),
LitBinary(..),
LitBinaryRaw(..),
Ident(..),
Lifetime(..),
Interpolated(..),
DocComment(..),
Shebang(..)
);
}
}

View File

@ -634,7 +634,7 @@ fn parse(cx: &mut ExtCtxt, tts: &[ast::TokenTree]) -> Option<String> {
return None
}
};
if !parser.eat(&token::EOF) {
if !parser.eat(&token::Eof) {
cx.span_err(parser.span, "only one string literal allowed");
return None;
}

View File

@ -428,7 +428,7 @@ impl <'l, 'tcx> DxrVisitor<'l, 'tcx> {
let qualname = format!("{}::{}", qualname, name);
let typ = ppaux::ty_to_string(&self.analysis.ty_cx,
(*self.analysis.ty_cx.node_types.borrow())[field.node.id as uint]);
match self.span.sub_span_before_token(field.span, token::COLON) {
match self.span.sub_span_before_token(field.span, token::Colon) {
Some(sub_span) => self.fmt.field_str(field.span,
Some(sub_span),
field.node.id,
@ -1175,7 +1175,7 @@ impl<'l, 'tcx, 'v> Visitor<'v> for DxrVisitor<'l, 'tcx> {
// 'use' always introduces an alias, if there is not an explicit
// one, there is an implicit one.
let sub_span =
match self.span.sub_span_before_token(path.span, token::EQ) {
match self.span.sub_span_before_token(path.span, token::Eq) {
Some(sub_span) => Some(sub_span),
None => sub_span,
};

View File

@ -19,7 +19,7 @@ use syntax::codemap::*;
use syntax::parse::lexer;
use syntax::parse::lexer::{Reader,StringReader};
use syntax::parse::token;
use syntax::parse::token::{is_keyword,keywords,is_ident,Token};
use syntax::parse::token::{keywords, Token};
pub struct SpanUtils<'a> {
pub sess: &'a Session,
@ -93,18 +93,18 @@ impl<'a> SpanUtils<'a> {
let mut bracket_count = 0u;
loop {
let ts = toks.next_token();
if ts.tok == token::EOF {
if ts.tok == token::Eof {
return self.make_sub_span(span, result)
}
if bracket_count == 0 &&
(is_ident(&ts.tok) || is_keyword(keywords::Self, &ts.tok)) {
(ts.tok.is_ident() || ts.tok.is_keyword(keywords::Self)) {
result = Some(ts.sp);
}
bracket_count += match ts.tok {
token::LT => 1,
token::GT => -1,
token::BINOP(token::SHR) => -2,
token::Lt => 1,
token::Gt => -1,
token::BinOp(token::Shr) => -2,
_ => 0
}
}
@ -116,18 +116,18 @@ impl<'a> SpanUtils<'a> {
let mut bracket_count = 0u;
loop {
let ts = toks.next_token();
if ts.tok == token::EOF {
if ts.tok == token::Eof {
return None;
}
if bracket_count == 0 &&
(is_ident(&ts.tok) || is_keyword(keywords::Self, &ts.tok)) {
(ts.tok.is_ident() || ts.tok.is_keyword(keywords::Self)) {
return self.make_sub_span(span, Some(ts.sp));
}
bracket_count += match ts.tok {
token::LT => 1,
token::GT => -1,
token::BINOP(token::SHR) => -2,
token::Lt => 1,
token::Gt => -1,
token::BinOp(token::Shr) => -2,
_ => 0
}
}
@ -141,36 +141,36 @@ impl<'a> SpanUtils<'a> {
let mut result = None;
let mut bracket_count = 0u;
let mut last_span = None;
while prev.tok != token::EOF {
while prev.tok != token::Eof {
last_span = None;
let mut next = toks.next_token();
if (next.tok == token::LPAREN ||
next.tok == token::LT) &&
if (next.tok == token::LParen ||
next.tok == token::Lt) &&
bracket_count == 0 &&
is_ident(&prev.tok) {
prev.tok.is_ident() {
result = Some(prev.sp);
}
if bracket_count == 0 &&
next.tok == token::MOD_SEP {
next.tok == token::ModSep {
let old = prev;
prev = next;
next = toks.next_token();
if next.tok == token::LT &&
is_ident(&old.tok) {
if next.tok == token::Lt &&
old.tok.is_ident() {
result = Some(old.sp);
}
}
bracket_count += match prev.tok {
token::LPAREN | token::LT => 1,
token::RPAREN | token::GT => -1,
token::BINOP(token::SHR) => -2,
token::LParen | token::Lt => 1,
token::RParen | token::Gt => -1,
token::BinOp(token::Shr) => -2,
_ => 0
};
if is_ident(&prev.tok) && bracket_count == 0 {
if prev.tok.is_ident() && bracket_count == 0 {
last_span = Some(prev.sp);
}
prev = next;
@ -191,21 +191,21 @@ impl<'a> SpanUtils<'a> {
loop {
let next = toks.next_token();
if (next.tok == token::LT ||
next.tok == token::COLON) &&
if (next.tok == token::Lt ||
next.tok == token::Colon) &&
bracket_count == 0 &&
is_ident(&prev.tok) {
prev.tok.is_ident() {
result = Some(prev.sp);
}
bracket_count += match prev.tok {
token::LT => 1,
token::GT => -1,
token::BINOP(token::SHR) => -2,
token::Lt => 1,
token::Gt => -1,
token::BinOp(token::Shr) => -2,
_ => 0
};
if next.tok == token::EOF {
if next.tok == token::Eof {
break;
}
prev = next;
@ -216,7 +216,7 @@ impl<'a> SpanUtils<'a> {
format!("Mis-counted brackets when breaking path? Parsing '{}' in {}, line {}",
self.snippet(span), loc.file.name, loc.line).as_slice());
}
if result.is_none() && is_ident(&prev.tok) && bracket_count == 0 {
if result.is_none() && prev.tok.is_ident() && bracket_count == 0 {
return self.make_sub_span(span, Some(prev.sp));
}
self.make_sub_span(span, result)
@ -235,7 +235,7 @@ impl<'a> SpanUtils<'a> {
let mut bracket_count = 0i;
loop {
let ts = toks.next_token();
if ts.tok == token::EOF {
if ts.tok == token::Eof {
if bracket_count != 0 {
let loc = self.sess.codemap().lookup_char_pos(span.lo);
self.sess.span_bug(span, format!(
@ -248,13 +248,13 @@ impl<'a> SpanUtils<'a> {
return result;
}
bracket_count += match ts.tok {
token::LT => 1,
token::GT => -1,
token::BINOP(token::SHL) => 2,
token::BINOP(token::SHR) => -2,
token::Lt => 1,
token::Gt => -1,
token::BinOp(token::Shl) => 2,
token::BinOp(token::Shr) => -2,
_ => 0
};
if is_ident(&ts.tok) &&
if ts.tok.is_ident() &&
bracket_count == nesting {
result.push(self.make_sub_span(span, Some(ts.sp)).unwrap());
}
@ -265,7 +265,7 @@ impl<'a> SpanUtils<'a> {
let mut toks = self.retokenise_span(span);
let mut prev = toks.next_token();
loop {
if prev.tok == token::EOF {
if prev.tok == token::Eof {
return None;
}
let next = toks.next_token();
@ -282,12 +282,12 @@ impl<'a> SpanUtils<'a> {
let mut toks = self.retokenise_span(span);
loop {
let ts = toks.next_token();
if ts.tok == token::EOF {
if ts.tok == token::Eof {
return None;
}
if is_keyword(keyword, &ts.tok) {
if ts.tok.is_keyword(keyword) {
let ts = toks.next_token();
if ts.tok == token::EOF {
if ts.tok == token::Eof {
return None
} else {
return self.make_sub_span(span, Some(ts.sp));

View File

@ -17,7 +17,7 @@ use html::escape::Escape;
use std::io;
use syntax::parse::lexer;
use syntax::parse::token as t;
use syntax::parse::token;
use syntax::parse;
/// Highlights some source code, returning the HTML output.
@ -63,19 +63,19 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
let snip = |sp| sess.span_diagnostic.cm.span_to_snippet(sp).unwrap();
if next.tok == t::EOF { break }
if next.tok == token::Eof { break }
let klass = match next.tok {
t::WS => {
token::Whitespace => {
try!(write!(out, "{}", Escape(snip(next.sp).as_slice())));
continue
},
t::COMMENT => {
token::Comment => {
try!(write!(out, "<span class='comment'>{}</span>",
Escape(snip(next.sp).as_slice())));
continue
},
t::SHEBANG(s) => {
token::Shebang(s) => {
try!(write!(out, "{}", Escape(s.as_str())));
continue
},
@ -83,24 +83,25 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
// that it's the address-of operator instead of the and-operator.
// This allows us to give all pointers their own class (`Box` and
// `@` are below).
t::BINOP(t::AND) if lexer.peek().sp.lo == next.sp.hi => "kw-2",
t::AT | t::TILDE => "kw-2",
token::BinOp(token::And) if lexer.peek().sp.lo == next.sp.hi => "kw-2",
token::At | token::Tilde => "kw-2",
// consider this as part of a macro invocation if there was a
// leading identifier
t::NOT if is_macro => { is_macro = false; "macro" }
token::Not if is_macro => { is_macro = false; "macro" }
// operators
t::EQ | t::LT | t::LE | t::EQEQ | t::NE | t::GE | t::GT |
t::ANDAND | t::OROR | t::NOT | t::BINOP(..) | t::RARROW |
t::BINOPEQ(..) | t::FAT_ARROW => "op",
token::Eq | token::Lt | token::Le | token::EqEq | token::Ne | token::Ge | token::Gt |
token::AndAnd | token::OrOr | token::Not | token::BinOp(..) | token::RArrow |
token::BinOpEq(..) | token::FatArrow => "op",
// miscellaneous, no highlighting
t::DOT | t::DOTDOT | t::DOTDOTDOT | t::COMMA | t::SEMI |
t::COLON | t::MOD_SEP | t::LARROW | t::LPAREN |
t::RPAREN | t::LBRACKET | t::LBRACE | t::RBRACE | t::QUESTION => "",
t::DOLLAR => {
if t::is_ident(&lexer.peek().tok) {
token::Dot | token::DotDot | token::DotDotDot | token::Comma | token::Semi |
token::Colon | token::ModSep | token::LArrow | token::LParen |
token::RParen | token::LBracket | token::LBrace | token::RBrace |
token::Question => "",
token::Dollar => {
if lexer.peek().tok.is_ident() {
is_macro_nonterminal = true;
"macro-nonterminal"
} else {
@ -112,12 +113,12 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
// continue highlighting it as an attribute until the ending ']' is
// seen, so skip out early. Down below we terminate the attribute
// span when we see the ']'.
t::POUND => {
token::Pound => {
is_attribute = true;
try!(write!(out, r"<span class='attribute'>#"));
continue
}
t::RBRACKET => {
token::RBracket => {
if is_attribute {
is_attribute = false;
try!(write!(out, "]</span>"));
@ -128,15 +129,15 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
}
// text literals
t::LIT_BYTE(..) | t::LIT_BINARY(..) | t::LIT_BINARY_RAW(..) |
t::LIT_CHAR(..) | t::LIT_STR(..) | t::LIT_STR_RAW(..) => "string",
token::LitByte(..) | token::LitBinary(..) | token::LitBinaryRaw(..) |
token::LitChar(..) | token::LitStr(..) | token::LitStrRaw(..) => "string",
// number literals
t::LIT_INTEGER(..) | t::LIT_FLOAT(..) => "number",
token::LitInteger(..) | token::LitFloat(..) => "number",
// keywords are also included in the identifier set
t::IDENT(ident, _is_mod_sep) => {
match t::get_ident(ident).get() {
token::Ident(ident, _is_mod_sep) => {
match token::get_ident(ident).get() {
"ref" | "mut" => "kw-2",
"self" => "self",
@ -145,12 +146,12 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
"Option" | "Result" => "prelude-ty",
"Some" | "None" | "Ok" | "Err" => "prelude-val",
_ if t::is_any_keyword(&next.tok) => "kw",
_ if next.tok.is_any_keyword() => "kw",
_ => {
if is_macro_nonterminal {
is_macro_nonterminal = false;
"macro-nonterminal"
} else if lexer.peek().tok == t::NOT {
} else if lexer.peek().tok == token::Not {
is_macro = true;
"macro"
} else {
@ -160,9 +161,9 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
}
}
t::LIFETIME(..) => "lifetime",
t::DOC_COMMENT(..) => "doccomment",
t::UNDERSCORE | t::EOF | t::INTERPOLATED(..) => "",
token::Lifetime(..) => "lifetime",
token::DocComment(..) => "doccomment",
token::Underscore | token::Eof | token::Interpolated(..) => "",
};
// as mentioned above, use the original source code instead of

View File

@ -82,7 +82,7 @@ impl PartialEq for Ident {
//
// On the other hand, if the comparison does need to be hygienic,
// one example and its non-hygienic counterpart would be:
// syntax::parse::token::mtwt_token_eq
// syntax::parse::token::Token::mtwt_eq
// syntax::ext::tt::macro_parser::token_name_eq
fail!("not allowed to compare these idents: {}, {}. \
Probably related to issue \\#6993", self, other);

View File

@ -50,7 +50,7 @@ pub fn expand_diagnostic_used<'cx>(ecx: &'cx mut ExtCtxt,
token_tree: &[TokenTree])
-> Box<MacResult+'cx> {
let code = match token_tree {
[ast::TtToken(_, token::IDENT(code, _))] => code,
[ast::TtToken(_, token::Ident(code, _))] => code,
_ => unreachable!()
};
with_registered_diagnostics(|diagnostics| {
@ -82,12 +82,12 @@ pub fn expand_register_diagnostic<'cx>(ecx: &'cx mut ExtCtxt,
token_tree: &[TokenTree])
-> Box<MacResult+'cx> {
let (code, description) = match token_tree {
[ast::TtToken(_, token::IDENT(ref code, _))] => {
[ast::TtToken(_, token::Ident(ref code, _))] => {
(code, None)
},
[ast::TtToken(_, token::IDENT(ref code, _)),
ast::TtToken(_, token::COMMA),
ast::TtToken(_, token::LIT_STR_RAW(description, _))] => {
[ast::TtToken(_, token::Ident(ref code, _)),
ast::TtToken(_, token::Comma),
ast::TtToken(_, token::LitStrRaw(description, _))] => {
(code, Some(description))
}
_ => unreachable!()
@ -110,7 +110,7 @@ pub fn expand_build_diagnostic_array<'cx>(ecx: &'cx mut ExtCtxt,
token_tree: &[TokenTree])
-> Box<MacResult+'cx> {
let name = match token_tree {
[ast::TtToken(_, token::IDENT(ref name, _))] => name,
[ast::TtToken(_, token::Ident(ref name, _))] => name,
_ => unreachable!()
};

View File

@ -72,21 +72,21 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
asm_str_style = Some(style);
}
Outputs => {
while p.token != token::EOF &&
p.token != token::COLON &&
p.token != token::MOD_SEP {
while p.token != token::Eof &&
p.token != token::Colon &&
p.token != token::ModSep {
if outputs.len() != 0 {
p.eat(&token::COMMA);
p.eat(&token::Comma);
}
let (constraint, _str_style) = p.parse_str();
let span = p.last_span;
p.expect(&token::LPAREN);
p.expect(&token::LParen);
let out = p.parse_expr();
p.expect(&token::RPAREN);
p.expect(&token::RParen);
// Expands a read+write operand into two operands.
//
@ -113,12 +113,12 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
}
}
Inputs => {
while p.token != token::EOF &&
p.token != token::COLON &&
p.token != token::MOD_SEP {
while p.token != token::Eof &&
p.token != token::Colon &&
p.token != token::ModSep {
if inputs.len() != 0 {
p.eat(&token::COMMA);
p.eat(&token::Comma);
}
let (constraint, _str_style) = p.parse_str();
@ -129,21 +129,21 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
cx.span_err(p.last_span, "input operand constraint contains '+'");
}
p.expect(&token::LPAREN);
p.expect(&token::LParen);
let input = p.parse_expr();
p.expect(&token::RPAREN);
p.expect(&token::RParen);
inputs.push((constraint, input));
}
}
Clobbers => {
let mut clobs = Vec::new();
while p.token != token::EOF &&
p.token != token::COLON &&
p.token != token::MOD_SEP {
while p.token != token::Eof &&
p.token != token::Colon &&
p.token != token::ModSep {
if clobs.len() != 0 {
p.eat(&token::COMMA);
p.eat(&token::Comma);
}
let (s, _str_style) = p.parse_str();
@ -172,8 +172,8 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
cx.span_warn(p.last_span, "unrecognized option");
}
if p.token == token::COMMA {
p.eat(&token::COMMA);
if p.token == token::Comma {
p.eat(&token::Comma);
}
}
StateNone => ()
@ -183,17 +183,17 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
// MOD_SEP is a double colon '::' without space in between.
// When encountered, the state must be advanced twice.
match (&p.token, state.next(), state.next().next()) {
(&token::COLON, StateNone, _) |
(&token::MOD_SEP, _, StateNone) => {
(&token::Colon, StateNone, _) |
(&token::ModSep, _, StateNone) => {
p.bump();
break 'statement;
}
(&token::COLON, st, _) |
(&token::MOD_SEP, _, st) => {
(&token::Colon, st, _) |
(&token::ModSep, _, st) => {
p.bump();
state = st;
}
(&token::EOF, _, _) => break 'statement,
(&token::Eof, _, _) => break 'statement,
_ => break
}
}

View File

@ -684,8 +684,8 @@ pub fn get_single_str_from_tts(cx: &ExtCtxt,
cx.span_err(sp, format!("{} takes 1 argument.", name).as_slice());
} else {
match tts[0] {
ast::TtToken(_, token::LIT_STR(ident)) => return Some(parse::str_lit(ident.as_str())),
ast::TtToken(_, token::LIT_STR_RAW(ident, _)) => {
ast::TtToken(_, token::LitStr(ident)) => return Some(parse::str_lit(ident.as_str())),
ast::TtToken(_, token::LitStrRaw(ident, _)) => {
return Some(parse::raw_str_lit(ident.as_str()))
}
_ => {
@ -704,12 +704,12 @@ pub fn get_exprs_from_tts(cx: &mut ExtCtxt,
tts: &[ast::TokenTree]) -> Option<Vec<P<ast::Expr>>> {
let mut p = cx.new_parser_from_tts(tts);
let mut es = Vec::new();
while p.token != token::EOF {
while p.token != token::Eof {
es.push(cx.expander().fold_expr(p.parse_expr()));
if p.eat(&token::COMMA) {
if p.eat(&token::Comma) {
continue;
}
if p.token != token::EOF {
if p.token != token::Eof {
cx.span_err(sp, "expected token: `,`");
return None;
}

View File

@ -29,7 +29,7 @@ pub fn expand_cfg<'cx>(cx: &mut ExtCtxt,
let mut p = cx.new_parser_from_tts(tts);
let cfg = p.parse_meta_item();
if !p.eat(&token::EOF) {
if !p.eat(&token::Eof) {
cx.span_err(sp, "expected 1 cfg-pattern");
return DummyResult::expr(sp);
}

View File

@ -23,21 +23,21 @@ pub fn expand_syntax_ext<'cx>(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]
for (i, e) in tts.iter().enumerate() {
if i & 1 == 1 {
match *e {
ast::TtToken(_, token::COMMA) => (),
ast::TtToken(_, token::Comma) => {},
_ => {
cx.span_err(sp, "concat_idents! expecting comma.");
return DummyResult::expr(sp);
}
},
}
} else {
match *e {
ast::TtToken(_, token::IDENT(ident,_)) => {
ast::TtToken(_, token::Ident(ident, _)) => {
res_str.push_str(token::get_ident(ident).get())
}
},
_ => {
cx.span_err(sp, "concat_idents! requires ident args.");
return DummyResult::expr(sp);
}
},
}
}
}

View File

@ -91,7 +91,7 @@ fn parse_args(ecx: &mut ExtCtxt, sp: Span, allow_method: bool,
// Parse the leading function expression (maybe a block, maybe a path)
let invocation = if allow_method {
let e = p.parse_expr();
if !p.eat(&token::COMMA) {
if !p.eat(&token::Comma) {
ecx.span_err(sp, "expected token: `,`");
return (Call(e), None);
}
@ -99,28 +99,27 @@ fn parse_args(ecx: &mut ExtCtxt, sp: Span, allow_method: bool,
} else {
Call(p.parse_expr())
};
if !p.eat(&token::COMMA) {
if !p.eat(&token::Comma) {
ecx.span_err(sp, "expected token: `,`");
return (invocation, None);
}
if p.token == token::EOF {
if p.token == token::Eof {
ecx.span_err(sp, "requires at least a format string argument");
return (invocation, None);
}
let fmtstr = p.parse_expr();
let mut named = false;
while p.token != token::EOF {
if !p.eat(&token::COMMA) {
while p.token != token::Eof {
if !p.eat(&token::Comma) {
ecx.span_err(sp, "expected token: `,`");
return (invocation, None);
}
if p.token == token::EOF { break } // accept trailing commas
if named || (token::is_ident(&p.token) &&
p.look_ahead(1, |t| *t == token::EQ)) {
if p.token == token::Eof { break } // accept trailing commas
if named || (p.token.is_ident() && p.look_ahead(1, |t| *t == token::Eq)) {
named = true;
let ident = match p.token {
token::IDENT(i, _) => {
token::Ident(i, _) => {
p.bump();
i
}
@ -139,7 +138,7 @@ fn parse_args(ecx: &mut ExtCtxt, sp: Span, allow_method: bool,
};
let interned_name = token::get_ident(ident);
let name = interned_name.get();
p.expect(&token::EQ);
p.expect(&token::Eq);
let e = p.parse_expr();
match names.find_equiv(&name) {
None => {}

View File

@ -515,123 +515,127 @@ fn mk_token_path(cx: &ExtCtxt, sp: Span, name: &str) -> P<ast::Expr> {
cx.expr_path(cx.path_global(sp, idents))
}
fn mk_binop(cx: &ExtCtxt, sp: Span, bop: token::BinOp) -> P<ast::Expr> {
fn mk_binop(cx: &ExtCtxt, sp: Span, bop: token::BinOpToken) -> P<ast::Expr> {
let name = match bop {
PLUS => "PLUS",
MINUS => "MINUS",
STAR => "STAR",
SLASH => "SLASH",
PERCENT => "PERCENT",
CARET => "CARET",
AND => "AND",
OR => "OR",
SHL => "SHL",
SHR => "SHR"
token::Plus => "Plus",
token::Minus => "Minus",
token::Star => "Star",
token::Slash => "Slash",
token::Percent => "Percent",
token::Caret => "Caret",
token::And => "And",
token::Or => "Or",
token::Shl => "Shl",
token::Shr => "Shr"
};
mk_token_path(cx, sp, name)
}
#[allow(non_uppercase_statics)] // NOTE(stage0): remove this attribute after the next snapshot
fn mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> P<ast::Expr> {
match *tok {
BINOP(binop) => {
return cx.expr_call(sp, mk_token_path(cx, sp, "BINOP"), vec!(mk_binop(cx, sp, binop)));
token::BinOp(binop) => {
return cx.expr_call(sp, mk_token_path(cx, sp, "BinOp"), vec!(mk_binop(cx, sp, binop)));
}
BINOPEQ(binop) => {
return cx.expr_call(sp, mk_token_path(cx, sp, "BINOPEQ"),
token::BinOpEq(binop) => {
return cx.expr_call(sp, mk_token_path(cx, sp, "BinOpEq"),
vec!(mk_binop(cx, sp, binop)));
}
LIT_BYTE(i) => {
token::LitByte(i) => {
let e_byte = mk_name(cx, sp, i.ident());
return cx.expr_call(sp, mk_token_path(cx, sp, "LIT_BYTE"), vec!(e_byte));
return cx.expr_call(sp, mk_token_path(cx, sp, "LitByte"), vec!(e_byte));
}
LIT_CHAR(i) => {
token::LitChar(i) => {
let e_char = mk_name(cx, sp, i.ident());
return cx.expr_call(sp, mk_token_path(cx, sp, "LIT_CHAR"), vec!(e_char));
return cx.expr_call(sp, mk_token_path(cx, sp, "LitChar"), vec!(e_char));
}
LIT_INTEGER(i) => {
token::LitInteger(i) => {
let e_int = mk_name(cx, sp, i.ident());
return cx.expr_call(sp, mk_token_path(cx, sp, "LIT_INTEGER"), vec!(e_int));
return cx.expr_call(sp, mk_token_path(cx, sp, "LitInteger"), vec!(e_int));
}
LIT_FLOAT(fident) => {
token::LitFloat(fident) => {
let e_fident = mk_name(cx, sp, fident.ident());
return cx.expr_call(sp, mk_token_path(cx, sp, "LIT_FLOAT"), vec!(e_fident));
return cx.expr_call(sp, mk_token_path(cx, sp, "LitFloat"), vec!(e_fident));
}
LIT_STR(ident) => {
token::LitStr(ident) => {
return cx.expr_call(sp,
mk_token_path(cx, sp, "LIT_STR"),
mk_token_path(cx, sp, "LitStr"),
vec!(mk_name(cx, sp, ident.ident())));
}
LIT_STR_RAW(ident, n) => {
token::LitStrRaw(ident, n) => {
return cx.expr_call(sp,
mk_token_path(cx, sp, "LIT_STR_RAW"),
mk_token_path(cx, sp, "LitStrRaw"),
vec!(mk_name(cx, sp, ident.ident()), cx.expr_uint(sp, n)));
}
IDENT(ident, b) => {
token::Ident(ident, style) => {
return cx.expr_call(sp,
mk_token_path(cx, sp, "IDENT"),
vec!(mk_ident(cx, sp, ident), cx.expr_bool(sp, b)));
mk_token_path(cx, sp, "Ident"),
vec![mk_ident(cx, sp, ident),
match style {
ModName => mk_token_path(cx, sp, "ModName"),
Plain => mk_token_path(cx, sp, "Plain"),
}]);
}
LIFETIME(ident) => {
token::Lifetime(ident) => {
return cx.expr_call(sp,
mk_token_path(cx, sp, "LIFETIME"),
mk_token_path(cx, sp, "Lifetime"),
vec!(mk_ident(cx, sp, ident)));
}
DOC_COMMENT(ident) => {
token::DocComment(ident) => {
return cx.expr_call(sp,
mk_token_path(cx, sp, "DOC_COMMENT"),
mk_token_path(cx, sp, "DocComment"),
vec!(mk_name(cx, sp, ident.ident())));
}
INTERPOLATED(_) => fail!("quote! with interpolated token"),
token::Interpolated(_) => fail!("quote! with interpolated token"),
_ => ()
}
let name = match *tok {
EQ => "EQ",
LT => "LT",
LE => "LE",
EQEQ => "EQEQ",
NE => "NE",
GE => "GE",
GT => "GT",
ANDAND => "ANDAND",
OROR => "OROR",
NOT => "NOT",
TILDE => "TILDE",
AT => "AT",
DOT => "DOT",
DOTDOT => "DOTDOT",
COMMA => "COMMA",
SEMI => "SEMI",
COLON => "COLON",
MOD_SEP => "MOD_SEP",
RARROW => "RARROW",
LARROW => "LARROW",
FAT_ARROW => "FAT_ARROW",
LPAREN => "LPAREN",
RPAREN => "RPAREN",
LBRACKET => "LBRACKET",
RBRACKET => "RBRACKET",
LBRACE => "LBRACE",
RBRACE => "RBRACE",
POUND => "POUND",
DOLLAR => "DOLLAR",
UNDERSCORE => "UNDERSCORE",
EOF => "EOF",
_ => fail!()
token::Eq => "Eq",
token::Lt => "Lt",
token::Le => "Le",
token::EqEq => "EqEq",
token::Ne => "Ne",
token::Ge => "Ge",
token::Gt => "Gt",
token::AndAnd => "AndAnd",
token::OrOr => "OrOr",
token::Not => "Not",
token::Tilde => "Tilde",
token::At => "At",
token::Dot => "Dot",
token::DotDot => "DotDot",
token::Comma => "Comma",
token::Semi => "Semi",
token::Colon => "Colon",
token::ModSep => "ModSep",
token::RArrow => "RArrow",
token::LArrow => "LArrow",
token::FatArrow => "FatArrow",
token::LParen => "LParen",
token::RParen => "RParen",
token::LBracket => "LBracket",
token::RBracket => "RBracket",
token::LBrace => "LBrace",
token::RBrace => "RBrace",
token::Pound => "Pound",
token::Dollar => "Dollar",
token::Underscore => "Underscore",
token::Eof => "Eof",
_ => fail!(),
};
mk_token_path(cx, sp, name)
}
@ -702,7 +706,7 @@ fn expand_tts(cx: &ExtCtxt, sp: Span, tts: &[ast::TokenTree])
p.quote_depth += 1u;
let cx_expr = p.parse_expr();
if !p.eat(&token::COMMA) {
if !p.eat(&token::Comma) {
p.fatal("expected token `,`");
}

View File

@ -12,7 +12,7 @@ use ast;
use codemap::Span;
use ext::base::ExtCtxt;
use ext::base;
use parse::token::{keywords, is_keyword};
use parse::token::keywords;
pub fn expand_trace_macros(cx: &mut ExtCtxt,
@ -20,10 +20,10 @@ pub fn expand_trace_macros(cx: &mut ExtCtxt,
tt: &[ast::TokenTree])
-> Box<base::MacResult+'static> {
match tt {
[ast::TtToken(_, ref tok)] if is_keyword(keywords::True, tok) => {
[ast::TtToken(_, ref tok)] if tok.is_keyword(keywords::True) => {
cx.set_trace_macros(true);
}
[ast::TtToken(_, ref tok)] if is_keyword(keywords::False, tok) => {
[ast::TtToken(_, ref tok)] if tok.is_keyword(keywords::False) => {
cx.set_trace_macros(false);
}
_ => cx.span_err(sp, "trace_macros! accepts only `true` or `false`"),

View File

@ -85,8 +85,9 @@ use parse::lexer::*; //resolve bug?
use parse::ParseSess;
use parse::attr::ParserAttr;
use parse::parser::{LifetimeAndTypesWithoutColons, Parser};
use parse::token::{Token, EOF, Nonterminal};
use parse::token::{Token, Nonterminal};
use parse::token;
use print::pprust;
use ptr::P;
use std::rc::Rc;
@ -226,8 +227,8 @@ pub fn parse_or_else(sess: &ParseSess,
/// unhygienic comparison)
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
(&token::IDENT(id1,_),&token::IDENT(id2,_))
| (&token::LIFETIME(id1),&token::LIFETIME(id2)) =>
(&token::Ident(id1,_),&token::Ident(id2,_))
| (&token::Lifetime(id1),&token::Lifetime(id2)) =>
id1.name == id2.name,
_ => *t1 == *t2
}
@ -354,9 +355,9 @@ pub fn parse(sess: &ParseSess,
// Built-in nonterminals never start with these tokens,
// so we can eliminate them from consideration.
match tok {
token::RPAREN |
token::RBRACE |
token::RBRACKET => {},
token::RParen |
token::RBrace |
token::RBracket => {},
_ => bb_eis.push(ei)
}
}
@ -372,7 +373,7 @@ pub fn parse(sess: &ParseSess,
}
/* error messages here could be improved with links to orig. rules */
if token_name_eq(&tok, &EOF) {
if token_name_eq(&tok, &token::Eof) {
if eof_eis.len() == 1u {
let mut v = Vec::new();
for dv in eof_eis.get_mut(0).matches.iter_mut() {
@ -402,7 +403,7 @@ pub fn parse(sess: &ParseSess,
nts, next_eis.len()).to_string());
} else if bb_eis.len() == 0u && next_eis.len() == 0u {
return Failure(sp, format!("no rules expected the token `{}`",
token::to_string(&tok)).to_string());
pprust::token_to_string(&tok)).to_string());
} else if next_eis.len() > 0u {
/* Now process the next token */
while next_eis.len() > 0u {
@ -447,9 +448,9 @@ pub fn parse_nt(p: &mut Parser, name: &str) -> Nonterminal {
"ty" => token::NtTy(p.parse_ty(false /* no need to disambiguate*/)),
// this could be handled like a token, since it is one
"ident" => match p.token {
token::IDENT(sn,b) => { p.bump(); token::NtIdent(box sn,b) }
token::Ident(sn,b) => { p.bump(); token::NtIdent(box sn,b) }
_ => {
let token_str = token::to_string(&p.token);
let token_str = pprust::token_to_string(&p.token);
p.fatal((format!("expected ident, found {}",
token_str.as_slice())).as_slice())
}

View File

@ -20,7 +20,7 @@ use parse::lexer::new_tt_reader;
use parse::parser::Parser;
use parse::attr::ParserAttr;
use parse::token::{special_idents, gensym_ident};
use parse::token::{FAT_ARROW, SEMI, NtMatchers, NtTT, EOF};
use parse::token::{NtMatchers, NtTT};
use parse::token;
use print;
use ptr::P;
@ -43,10 +43,10 @@ impl<'a> ParserAnyMacro<'a> {
/// allowed to be there.
fn ensure_complete_parse(&self, allow_semi: bool) {
let mut parser = self.parser.borrow_mut();
if allow_semi && parser.token == SEMI {
if allow_semi && parser.token == token::Semi {
parser.bump()
}
if parser.token != EOF {
if parser.token != token::Eof {
let token_str = parser.this_token_to_string();
let msg = format!("macro expansion ignores token `{}` and any \
following",
@ -89,7 +89,7 @@ impl<'a> MacResult for ParserAnyMacro<'a> {
loop {
let mut parser = self.parser.borrow_mut();
match parser.token {
EOF => break,
token::Eof => break,
_ => {
let attrs = parser.parse_outer_attributes();
ret.push(parser.parse_method(attrs, ast::Inherited))
@ -231,12 +231,13 @@ pub fn add_new_extension<'cx>(cx: &'cx mut ExtCtxt,
let argument_gram = vec!(
ms(MatchSeq(vec!(
ms(MatchNonterminal(lhs_nm, special_idents::matchers, 0u)),
ms(MatchTok(FAT_ARROW)),
ms(MatchNonterminal(rhs_nm, special_idents::tt, 1u))), Some(SEMI),
ast::OneOrMore, 0u, 2u)),
ms(MatchTok(token::FatArrow)),
ms(MatchNonterminal(rhs_nm, special_idents::tt, 1u))),
Some(token::Semi), ast::OneOrMore, 0u, 2u)),
//to phase into semicolon-termination instead of
//semicolon-separation
ms(MatchSeq(vec!(ms(MatchTok(SEMI))), None, ast::ZeroOrMore, 2u, 2u)));
ms(MatchSeq(vec!(ms(MatchTok(token::Semi))), None,
ast::ZeroOrMore, 2u, 2u)));
// Parse the macro_rules! invocation (`none` is for no interpolations):

View File

@ -13,7 +13,7 @@ use ast::{TokenTree, TtDelimited, TtToken, TtSequence, TtNonterminal, Ident};
use codemap::{Span, DUMMY_SP};
use diagnostic::SpanHandler;
use ext::tt::macro_parser::{NamedMatch, MatchedSeq, MatchedNonterminal};
use parse::token::{EOF, INTERPOLATED, IDENT, Token, NtIdent};
use parse::token::{Token, NtIdent};
use parse::token;
use parse::lexer::TokenAndSpan;
@ -66,7 +66,7 @@ pub fn new_tt_reader<'a>(sp_diag: &'a SpanHandler,
repeat_idx: Vec::new(),
repeat_len: Vec::new(),
/* dummy values, never read: */
cur_tok: EOF,
cur_tok: token::Eof,
cur_span: DUMMY_SP,
};
tt_next_token(&mut r); /* get cur_tok and cur_span set up */
@ -158,7 +158,7 @@ pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan {
loop {
let should_pop = match r.stack.last() {
None => {
assert_eq!(ret_val.tok, EOF);
assert_eq!(ret_val.tok, token::Eof);
return ret_val;
}
Some(frame) => {
@ -175,7 +175,7 @@ pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan {
let prev = r.stack.pop().unwrap();
match r.stack.last_mut() {
None => {
r.cur_tok = EOF;
r.cur_tok = token::Eof;
return ret_val;
}
Some(frame) => {
@ -272,13 +272,13 @@ pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan {
(b) we actually can, since it's a token. */
MatchedNonterminal(NtIdent(box sn, b)) => {
r.cur_span = sp;
r.cur_tok = IDENT(sn,b);
r.cur_tok = token::Ident(sn,b);
return ret_val;
}
MatchedNonterminal(ref other_whole_nt) => {
// FIXME(pcwalton): Bad copy.
r.cur_span = sp;
r.cur_tok = INTERPOLATED((*other_whole_nt).clone());
r.cur_tok = token::Interpolated((*other_whole_nt).clone());
return ret_val;
}
MatchedSeq(..) => {

View File

@ -602,11 +602,11 @@ pub fn noop_fold_tts<T: Folder>(tts: &[TokenTree], fld: &mut T) -> Vec<TokenTree
// apply ident folder if it's an ident, apply other folds to interpolated nodes
pub fn noop_fold_token<T: Folder>(t: token::Token, fld: &mut T) -> token::Token {
match t {
token::IDENT(id, followed_by_colons) => {
token::IDENT(fld.fold_ident(id), followed_by_colons)
token::Ident(id, followed_by_colons) => {
token::Ident(fld.fold_ident(id), followed_by_colons)
}
token::LIFETIME(id) => token::LIFETIME(fld.fold_ident(id)),
token::INTERPOLATED(nt) => token::INTERPOLATED(fld.fold_interpolated(nt)),
token::Lifetime(id) => token::Lifetime(fld.fold_ident(id)),
token::Interpolated(nt) => token::Interpolated(fld.fold_interpolated(nt)),
_ => t
}
}

View File

@ -14,7 +14,6 @@ use codemap::{spanned, Spanned, mk_sp, Span};
use parse::common::*; //resolve bug?
use parse::token;
use parse::parser::Parser;
use parse::token::INTERPOLATED;
use ptr::P;
/// A parser that can parse attributes.
@ -36,10 +35,10 @@ impl<'a> ParserAttr for Parser<'a> {
debug!("parse_outer_attributes: self.token={}",
self.token);
match self.token {
token::POUND => {
token::Pound => {
attrs.push(self.parse_attribute(false));
}
token::DOC_COMMENT(s) => {
token::DocComment(s) => {
let attr = ::attr::mk_sugared_doc_attr(
attr::mk_attr_id(),
self.id_to_interned_str(s.ident()),
@ -66,11 +65,11 @@ impl<'a> ParserAttr for Parser<'a> {
debug!("parse_attributes: permit_inner={} self.token={}",
permit_inner, self.token);
let (span, value, mut style) = match self.token {
token::POUND => {
token::Pound => {
let lo = self.span.lo;
self.bump();
let style = if self.eat(&token::NOT) {
let style = if self.eat(&token::Not) {
if !permit_inner {
let span = self.span;
self.span_err(span,
@ -82,10 +81,10 @@ impl<'a> ParserAttr for Parser<'a> {
ast::AttrOuter
};
self.expect(&token::LBRACKET);
self.expect(&token::LBracket);
let meta_item = self.parse_meta_item();
let hi = self.span.hi;
self.expect(&token::RBRACKET);
self.expect(&token::RBracket);
(mk_sp(lo, hi), meta_item, style)
}
@ -96,7 +95,7 @@ impl<'a> ParserAttr for Parser<'a> {
}
};
if permit_inner && self.eat(&token::SEMI) {
if permit_inner && self.eat(&token::Semi) {
self.span_warn(span, "this inner attribute syntax is deprecated. \
The new syntax is `#![foo]`, with a bang and no semicolon.");
style = ast::AttrInner;
@ -130,10 +129,10 @@ impl<'a> ParserAttr for Parser<'a> {
let mut next_outer_attrs: Vec<ast::Attribute> = Vec::new();
loop {
let attr = match self.token {
token::POUND => {
token::Pound => {
self.parse_attribute(true)
}
token::DOC_COMMENT(s) => {
token::DocComment(s) => {
// we need to get the position of this token before we bump.
let Span { lo, hi, .. } = self.span;
self.bump();
@ -161,7 +160,7 @@ impl<'a> ParserAttr for Parser<'a> {
/// | IDENT meta_seq
fn parse_meta_item(&mut self) -> P<ast::MetaItem> {
let nt_meta = match self.token {
token::INTERPOLATED(token::NtMeta(ref e)) => {
token::Interpolated(token::NtMeta(ref e)) => {
Some(e.clone())
}
_ => None
@ -179,7 +178,7 @@ impl<'a> ParserAttr for Parser<'a> {
let ident = self.parse_ident();
let name = self.id_to_interned_str(ident);
match self.token {
token::EQ => {
token::Eq => {
self.bump();
let lit = self.parse_lit();
// FIXME #623 Non-string meta items are not serialized correctly;
@ -195,7 +194,7 @@ impl<'a> ParserAttr for Parser<'a> {
let hi = self.span.hi;
P(spanned(lo, hi, ast::MetaNameValue(name, lit)))
}
token::LPAREN => {
token::LParen => {
let inner_items = self.parse_meta_seq();
let hi = self.span.hi;
P(spanned(lo, hi, ast::MetaList(name, inner_items)))
@ -209,15 +208,15 @@ impl<'a> ParserAttr for Parser<'a> {
/// matches meta_seq = ( COMMASEP(meta_item) )
fn parse_meta_seq(&mut self) -> Vec<P<ast::MetaItem>> {
self.parse_seq(&token::LPAREN,
&token::RPAREN,
seq_sep_trailing_disallowed(token::COMMA),
self.parse_seq(&token::LParen,
&token::RParen,
seq_sep_trailing_disallowed(token::Comma),
|p| p.parse_meta_item()).node
}
fn parse_optional_meta(&mut self) -> Vec<P<ast::MetaItem>> {
match self.token {
token::LPAREN => self.parse_meta_seq(),
token::LParen => self.parse_meta_seq(),
_ => Vec::new()
}
}

View File

@ -15,7 +15,7 @@ use parse::lexer::{is_whitespace, Reader};
use parse::lexer::{StringReader, TokenAndSpan};
use parse::lexer::is_block_doc_comment;
use parse::lexer;
use parse::token;
use print::pprust;
use std::io;
use std::str;
@ -367,13 +367,13 @@ pub fn gather_comments_and_literals(span_diagnostic: &diagnostic::SpanHandler,
rdr.next_token();
//discard, and look ahead; we're working with internal state
let TokenAndSpan { tok, sp } = rdr.peek();
if token::is_lit(&tok) {
if tok.is_lit() {
rdr.with_str_from(bstart, |s| {
debug!("tok lit: {}", s);
literals.push(Literal {lit: s.to_string(), pos: sp.lo});
})
} else {
debug!("tok: {}", token::to_string(&tok));
debug!("tok: {}", pprust::token_to_string(&tok));
}
first_read = false;
}

View File

@ -69,7 +69,7 @@ impl<'a> Reader for StringReader<'a> {
/// Return the next token. EFFECT: advances the string_reader.
fn next_token(&mut self) -> TokenAndSpan {
let ret_val = TokenAndSpan {
tok: replace(&mut self.peek_tok, token::UNDERSCORE),
tok: replace(&mut self.peek_tok, token::Underscore),
sp: self.peek_span,
};
self.advance_token();
@ -92,7 +92,7 @@ impl<'a> Reader for StringReader<'a> {
impl<'a> Reader for TtReader<'a> {
fn is_eof(&self) -> bool {
self.cur_tok == token::EOF
self.cur_tok == token::Eof
}
fn next_token(&mut self) -> TokenAndSpan {
let r = tt_next_token(self);
@ -136,7 +136,7 @@ impl<'a> StringReader<'a> {
curr: Some('\n'),
filemap: filemap,
/* dummy values; not read */
peek_tok: token::EOF,
peek_tok: token::Eof,
peek_span: codemap::DUMMY_SP,
read_embedded_ident: false,
};
@ -213,7 +213,7 @@ impl<'a> StringReader<'a> {
},
None => {
if self.is_eof() {
self.peek_tok = token::EOF;
self.peek_tok = token::Eof;
} else {
let start_bytepos = self.last_pos;
self.peek_tok = self.next_token_inner();
@ -396,9 +396,9 @@ impl<'a> StringReader<'a> {
return self.with_str_from(start_bpos, |string| {
// but comments with only more "/"s are not
let tok = if is_doc_comment(string) {
token::DOC_COMMENT(token::intern(string))
token::DocComment(token::intern(string))
} else {
token::COMMENT
token::Comment
};
return Some(TokenAndSpan{
@ -410,7 +410,7 @@ impl<'a> StringReader<'a> {
let start_bpos = self.last_pos - BytePos(2);
while !self.curr_is('\n') && !self.is_eof() { self.bump(); }
return Some(TokenAndSpan {
tok: token::COMMENT,
tok: token::Comment,
sp: codemap::mk_sp(start_bpos, self.last_pos)
});
}
@ -440,7 +440,7 @@ impl<'a> StringReader<'a> {
let start = self.last_pos;
while !self.curr_is('\n') && !self.is_eof() { self.bump(); }
return Some(TokenAndSpan {
tok: token::SHEBANG(self.name_from(start)),
tok: token::Shebang(self.name_from(start)),
sp: codemap::mk_sp(start, self.last_pos)
});
}
@ -466,7 +466,7 @@ impl<'a> StringReader<'a> {
let start_bpos = self.last_pos;
while is_whitespace(self.curr) { self.bump(); }
let c = Some(TokenAndSpan {
tok: token::WS,
tok: token::Whitespace,
sp: codemap::mk_sp(start_bpos, self.last_pos)
});
debug!("scanning whitespace: {}", c);
@ -519,9 +519,9 @@ impl<'a> StringReader<'a> {
self.translate_crlf(start_bpos, string,
"bare CR not allowed in block doc-comment")
} else { string.into_maybe_owned() };
token::DOC_COMMENT(token::intern(string.as_slice()))
token::DocComment(token::intern(string.as_slice()))
} else {
token::COMMENT
token::Comment
};
Some(TokenAndSpan{
@ -642,17 +642,17 @@ impl<'a> StringReader<'a> {
}
'u' | 'i' => {
self.scan_int_suffix();
return token::LIT_INTEGER(self.name_from(start_bpos));
return token::LitInteger(self.name_from(start_bpos));
},
'f' => {
let last_pos = self.last_pos;
self.scan_float_suffix();
self.check_float_base(start_bpos, last_pos, base);
return token::LIT_FLOAT(self.name_from(start_bpos));
return token::LitFloat(self.name_from(start_bpos));
}
_ => {
// just a 0
return token::LIT_INTEGER(self.name_from(start_bpos));
return token::LitInteger(self.name_from(start_bpos));
}
}
} else if c.is_digit_radix(10) {
@ -665,7 +665,7 @@ impl<'a> StringReader<'a> {
self.err_span_(start_bpos, self.last_pos, "no valid digits found for number");
// eat any suffix
self.scan_int_suffix();
return token::LIT_INTEGER(token::intern("0"));
return token::LitInteger(token::intern("0"));
}
// might be a float, but don't be greedy if this is actually an
@ -683,13 +683,13 @@ impl<'a> StringReader<'a> {
}
let last_pos = self.last_pos;
self.check_float_base(start_bpos, last_pos, base);
return token::LIT_FLOAT(self.name_from(start_bpos));
return token::LitFloat(self.name_from(start_bpos));
} else if self.curr_is('f') {
// or it might be an integer literal suffixed as a float
self.scan_float_suffix();
let last_pos = self.last_pos;
self.check_float_base(start_bpos, last_pos, base);
return token::LIT_FLOAT(self.name_from(start_bpos));
return token::LitFloat(self.name_from(start_bpos));
} else {
// it might be a float if it has an exponent
if self.curr_is('e') || self.curr_is('E') {
@ -697,11 +697,11 @@ impl<'a> StringReader<'a> {
self.scan_float_suffix();
let last_pos = self.last_pos;
self.check_float_base(start_bpos, last_pos, base);
return token::LIT_FLOAT(self.name_from(start_bpos));
return token::LitFloat(self.name_from(start_bpos));
}
// but we certainly have an integer!
self.scan_int_suffix();
return token::LIT_INTEGER(self.name_from(start_bpos));
return token::LitInteger(self.name_from(start_bpos));
}
}
@ -889,13 +889,13 @@ impl<'a> StringReader<'a> {
}
}
fn binop(&mut self, op: token::BinOp) -> token::Token {
fn binop(&mut self, op: token::BinOpToken) -> token::Token {
self.bump();
if self.curr_is('=') {
self.bump();
return token::BINOPEQ(op);
return token::BinOpEq(op);
} else {
return token::BINOP(op);
return token::BinOp(op);
}
}
@ -919,14 +919,16 @@ impl<'a> StringReader<'a> {
return self.with_str_from(start, |string| {
if string == "_" {
token::UNDERSCORE
token::Underscore
} else {
let is_mod_name = self.curr_is(':') && self.nextch_is(':');
// FIXME: perform NFKC normalization here. (Issue #2253)
token::IDENT(str_to_ident(string), is_mod_name)
if self.curr_is(':') && self.nextch_is(':') {
token::Ident(str_to_ident(string), token::ModName)
} else {
token::Ident(str_to_ident(string), token::Plain)
}
}
})
});
}
if is_dec_digit(c) {
@ -937,8 +939,11 @@ impl<'a> StringReader<'a> {
match (c.unwrap(), self.nextch(), self.nextnextch()) {
('\x00', Some('n'), Some('a')) => {
let ast_ident = self.scan_embedded_hygienic_ident();
let is_mod_name = self.curr_is(':') && self.nextch_is(':');
return token::IDENT(ast_ident, is_mod_name);
return if self.curr_is(':') && self.nextch_is(':') {
token::Ident(ast_ident, token::ModName)
} else {
token::Ident(ast_ident, token::Plain)
};
}
_ => {}
}
@ -946,84 +951,84 @@ impl<'a> StringReader<'a> {
match c.expect("next_token_inner called at EOF") {
// One-byte tokens.
';' => { self.bump(); return token::SEMI; }
',' => { self.bump(); return token::COMMA; }
';' => { self.bump(); return token::Semi; }
',' => { self.bump(); return token::Comma; }
'.' => {
self.bump();
return if self.curr_is('.') {
self.bump();
if self.curr_is('.') {
self.bump();
token::DOTDOTDOT
token::DotDotDot
} else {
token::DOTDOT
token::DotDot
}
} else {
token::DOT
token::Dot
};
}
'(' => { self.bump(); return token::LPAREN; }
')' => { self.bump(); return token::RPAREN; }
'{' => { self.bump(); return token::LBRACE; }
'}' => { self.bump(); return token::RBRACE; }
'[' => { self.bump(); return token::LBRACKET; }
']' => { self.bump(); return token::RBRACKET; }
'@' => { self.bump(); return token::AT; }
'#' => { self.bump(); return token::POUND; }
'~' => { self.bump(); return token::TILDE; }
'?' => { self.bump(); return token::QUESTION; }
'(' => { self.bump(); return token::LParen; }
')' => { self.bump(); return token::RParen; }
'{' => { self.bump(); return token::LBrace; }
'}' => { self.bump(); return token::RBrace; }
'[' => { self.bump(); return token::LBracket; }
']' => { self.bump(); return token::RBracket; }
'@' => { self.bump(); return token::At; }
'#' => { self.bump(); return token::Pound; }
'~' => { self.bump(); return token::Tilde; }
'?' => { self.bump(); return token::Question; }
':' => {
self.bump();
if self.curr_is(':') {
self.bump();
return token::MOD_SEP;
return token::ModSep;
} else {
return token::COLON;
return token::Colon;
}
}
'$' => { self.bump(); return token::DOLLAR; }
'$' => { self.bump(); return token::Dollar; }
// Multi-byte tokens.
'=' => {
self.bump();
if self.curr_is('=') {
self.bump();
return token::EQEQ;
return token::EqEq;
} else if self.curr_is('>') {
self.bump();
return token::FAT_ARROW;
return token::FatArrow;
} else {
return token::EQ;
return token::Eq;
}
}
'!' => {
self.bump();
if self.curr_is('=') {
self.bump();
return token::NE;
} else { return token::NOT; }
return token::Ne;
} else { return token::Not; }
}
'<' => {
self.bump();
match self.curr.unwrap_or('\x00') {
'=' => { self.bump(); return token::LE; }
'<' => { return self.binop(token::SHL); }
'=' => { self.bump(); return token::Le; }
'<' => { return self.binop(token::Shl); }
'-' => {
self.bump();
match self.curr.unwrap_or('\x00') {
_ => { return token::LARROW; }
_ => { return token::LArrow; }
}
}
_ => { return token::LT; }
_ => { return token::Lt; }
}
}
'>' => {
self.bump();
match self.curr.unwrap_or('\x00') {
'=' => { self.bump(); return token::GE; }
'>' => { return self.binop(token::SHR); }
_ => { return token::GT; }
'=' => { self.bump(); return token::Ge; }
'>' => { return self.binop(token::Shr); }
_ => { return token::Gt; }
}
}
'\'' => {
@ -1056,22 +1061,21 @@ impl<'a> StringReader<'a> {
str_to_ident(lifetime_name)
});
let keyword_checking_token =
&token::IDENT(keyword_checking_ident, false);
&token::Ident(keyword_checking_ident, token::Plain);
let last_bpos = self.last_pos;
if token::is_keyword(token::keywords::Self,
keyword_checking_token) {
if keyword_checking_token.is_keyword(token::keywords::Self) {
self.err_span_(start,
last_bpos,
"invalid lifetime name: 'self \
is no longer a special lifetime");
} else if token::is_any_keyword(keyword_checking_token) &&
!token::is_keyword(token::keywords::Static,
keyword_checking_token) {
} else if keyword_checking_token.is_any_keyword() &&
!keyword_checking_token.is_keyword(token::keywords::Static)
{
self.err_span_(start,
last_bpos,
"invalid lifetime name");
}
return token::LIFETIME(ident);
return token::Lifetime(ident);
}
// Otherwise it is a character constant:
@ -1087,7 +1091,7 @@ impl<'a> StringReader<'a> {
}
let id = if valid { self.name_from(start) } else { token::intern("0") };
self.bump(); // advance curr past token
return token::LIT_CHAR(id);
return token::LitChar(id);
}
'b' => {
self.bump();
@ -1095,7 +1099,7 @@ impl<'a> StringReader<'a> {
Some('\'') => self.scan_byte(),
Some('"') => self.scan_byte_string(),
Some('r') => self.scan_raw_byte_string(),
_ => unreachable!() // Should have been a token::IDENT above.
_ => unreachable!() // Should have been a token::Ident above.
};
}
@ -1118,7 +1122,7 @@ impl<'a> StringReader<'a> {
let id = if valid { self.name_from(start_bpos + BytePos(1)) }
else { token::intern("??") };
self.bump();
return token::LIT_STR(id);
return token::LitStr(id);
}
'r' => {
let start_bpos = self.last_pos;
@ -1185,33 +1189,33 @@ impl<'a> StringReader<'a> {
} else {
token::intern("??")
};
return token::LIT_STR_RAW(id, hash_count);
return token::LitStrRaw(id, hash_count);
}
'-' => {
if self.nextch_is('>') {
self.bump();
self.bump();
return token::RARROW;
} else { return self.binop(token::MINUS); }
return token::RArrow;
} else { return self.binop(token::Minus); }
}
'&' => {
if self.nextch_is('&') {
self.bump();
self.bump();
return token::ANDAND;
} else { return self.binop(token::AND); }
return token::AndAnd;
} else { return self.binop(token::And); }
}
'|' => {
match self.nextch() {
Some('|') => { self.bump(); self.bump(); return token::OROR; }
_ => { return self.binop(token::OR); }
Some('|') => { self.bump(); self.bump(); return token::OrOr; }
_ => { return self.binop(token::Or); }
}
}
'+' => { return self.binop(token::PLUS); }
'*' => { return self.binop(token::STAR); }
'/' => { return self.binop(token::SLASH); }
'^' => { return self.binop(token::CARET); }
'%' => { return self.binop(token::PERCENT); }
'+' => { return self.binop(token::Plus); }
'*' => { return self.binop(token::Star); }
'/' => { return self.binop(token::Slash); }
'^' => { return self.binop(token::Caret); }
'%' => { return self.binop(token::Percent); }
c => {
let last_bpos = self.last_pos;
let bpos = self.pos;
@ -1275,7 +1279,7 @@ impl<'a> StringReader<'a> {
let id = if valid { self.name_from(start) } else { token::intern("??") };
self.bump(); // advance curr past token
return token::LIT_BYTE(id);
return token::LitByte(id);
}
fn scan_byte_string(&mut self) -> token::Token {
@ -1297,7 +1301,7 @@ impl<'a> StringReader<'a> {
}
let id = if valid { self.name_from(start) } else { token::intern("??") };
self.bump();
return token::LIT_BINARY(id);
return token::LitBinary(id);
}
fn scan_raw_byte_string(&mut self) -> token::Token {
@ -1348,7 +1352,7 @@ impl<'a> StringReader<'a> {
self.bump();
}
self.bump();
return token::LIT_BINARY_RAW(self.name_from_to(content_start_bpos, content_end_bpos),
return token::LitBinaryRaw(self.name_from_to(content_start_bpos, content_end_bpos),
hash_count);
}
}
@ -1431,20 +1435,20 @@ mod test {
"/* my source file */ \
fn main() { println!(\"zebra\"); }\n".to_string());
let id = str_to_ident("fn");
assert_eq!(string_reader.next_token().tok, token::COMMENT);
assert_eq!(string_reader.next_token().tok, token::WS);
assert_eq!(string_reader.next_token().tok, token::Comment);
assert_eq!(string_reader.next_token().tok, token::Whitespace);
let tok1 = string_reader.next_token();
let tok2 = TokenAndSpan{
tok:token::IDENT(id, false),
tok:token::Ident(id, token::Plain),
sp:Span {lo:BytePos(21),hi:BytePos(23),expn_id: NO_EXPANSION}};
assert_eq!(tok1,tok2);
assert_eq!(string_reader.next_token().tok, token::WS);
assert_eq!(string_reader.next_token().tok, token::Whitespace);
// the 'main' id is already read:
assert_eq!(string_reader.last_pos.clone(), BytePos(28));
// read another token:
let tok3 = string_reader.next_token();
let tok4 = TokenAndSpan{
tok:token::IDENT(str_to_ident("main"), false),
tok:token::Ident(str_to_ident("main"), token::Plain),
sp:Span {lo:BytePos(24),hi:BytePos(28),expn_id: NO_EXPANSION}};
assert_eq!(tok3,tok4);
// the lparen is already read:
@ -1459,66 +1463,72 @@ mod test {
}
}
// make the identifier by looking up the string in the interner
#[cfg(stage0)]
fn mk_ident (id: &str, is_mod_name: bool) -> token::Token {
token::IDENT (str_to_ident(id),is_mod_name)
token::Ident(str_to_ident(id), is_mod_name)
}
// make the identifier by looking up the string in the interner
#[cfg(not(stage0))]
fn mk_ident(id: &str, style: token::IdentStyle) -> token::Token {
token::Ident(str_to_ident(id), style)
}
#[test] fn doublecolonparsing () {
check_tokenization(setup(&mk_sh(), "a b".to_string()),
vec!(mk_ident("a",false),
token::WS,
mk_ident("b",false)));
vec![mk_ident("a", token::Plain),
token::Whitespace,
mk_ident("b", token::Plain)]);
}
#[test] fn dcparsing_2 () {
check_tokenization(setup(&mk_sh(), "a::b".to_string()),
vec!(mk_ident("a",true),
token::MOD_SEP,
mk_ident("b",false)));
vec![mk_ident("a",token::ModName),
token::ModSep,
mk_ident("b", token::Plain)]);
}
#[test] fn dcparsing_3 () {
check_tokenization(setup(&mk_sh(), "a ::b".to_string()),
vec!(mk_ident("a",false),
token::WS,
token::MOD_SEP,
mk_ident("b",false)));
vec![mk_ident("a", token::Plain),
token::Whitespace,
token::ModSep,
mk_ident("b", token::Plain)]);
}
#[test] fn dcparsing_4 () {
check_tokenization(setup(&mk_sh(), "a:: b".to_string()),
vec!(mk_ident("a",true),
token::MOD_SEP,
token::WS,
mk_ident("b",false)));
vec![mk_ident("a",token::ModName),
token::ModSep,
token::Whitespace,
mk_ident("b", token::Plain)]);
}
#[test] fn character_a() {
assert_eq!(setup(&mk_sh(), "'a'".to_string()).next_token().tok,
token::LIT_CHAR(token::intern("a")));
token::LitChar(token::intern("a")));
}
#[test] fn character_space() {
assert_eq!(setup(&mk_sh(), "' '".to_string()).next_token().tok,
token::LIT_CHAR(token::intern(" ")));
token::LitChar(token::intern(" ")));
}
#[test] fn character_escaped() {
assert_eq!(setup(&mk_sh(), "'\\n'".to_string()).next_token().tok,
token::LIT_CHAR(token::intern("\\n")));
token::LitChar(token::intern("\\n")));
}
#[test] fn lifetime_name() {
assert_eq!(setup(&mk_sh(), "'abc".to_string()).next_token().tok,
token::LIFETIME(token::str_to_ident("'abc")));
token::Lifetime(token::str_to_ident("'abc")));
}
#[test] fn raw_string() {
assert_eq!(setup(&mk_sh(),
"r###\"\"#a\\b\x00c\"\"###".to_string()).next_token()
.tok,
token::LIT_STR_RAW(token::intern("\"#a\\b\x00c\""), 3));
token::LitStrRaw(token::intern("\"#a\\b\x00c\""), 3));
}
#[test] fn line_doc_comments() {
@ -1531,10 +1541,10 @@ mod test {
let sh = mk_sh();
let mut lexer = setup(&sh, "/* /* */ */'a'".to_string());
match lexer.next_token().tok {
token::COMMENT => { },
token::Comment => { },
_ => fail!("expected a comment!")
}
assert_eq!(lexer.next_token().tok, token::LIT_CHAR(token::intern("a")));
assert_eq!(lexer.next_token().tok, token::LitChar(token::intern("a")));
}
}

View File

@ -793,34 +793,34 @@ mod test {
let tts = string_to_tts("macro_rules! zip (($a)=>($a))".to_string());
let tts: &[ast::TokenTree] = tts.as_slice();
match tts {
[ast::TtToken(_, token::IDENT(name_macro_rules, false)),
ast::TtToken(_, token::NOT),
ast::TtToken(_, token::IDENT(name_zip, false)),
[ast::TtToken(_, token::Ident(name_macro_rules, token::Plain)),
ast::TtToken(_, token::Not),
ast::TtToken(_, token::Ident(name_zip, token::Plain)),
ast::TtDelimited(_, ref macro_delimed)]
if name_macro_rules.as_str() == "macro_rules"
&& name_zip.as_str() == "zip" => {
let (ref macro_open, ref macro_tts, ref macro_close) = **macro_delimed;
match (macro_open, macro_tts.as_slice(), macro_close) {
(&ast::Delimiter { token: token::LPAREN, .. },
(&ast::Delimiter { token: token::LParen, .. },
[ast::TtDelimited(_, ref first_delimed),
ast::TtToken(_, token::FAT_ARROW),
ast::TtToken(_, token::FatArrow),
ast::TtDelimited(_, ref second_delimed)],
&ast::Delimiter { token: token::RPAREN, .. }) => {
&ast::Delimiter { token: token::RParen, .. }) => {
let (ref first_open, ref first_tts, ref first_close) = **first_delimed;
match (first_open, first_tts.as_slice(), first_close) {
(&ast::Delimiter { token: token::LPAREN, .. },
[ast::TtToken(_, token::DOLLAR),
ast::TtToken(_, token::IDENT(name, false))],
&ast::Delimiter { token: token::RPAREN, .. })
(&ast::Delimiter { token: token::LParen, .. },
[ast::TtToken(_, token::Dollar),
ast::TtToken(_, token::Ident(name, token::Plain))],
&ast::Delimiter { token: token::RParen, .. })
if name.as_str() == "a" => {},
_ => fail!("value 3: {}", **first_delimed),
}
let (ref second_open, ref second_tts, ref second_close) = **second_delimed;
match (second_open, second_tts.as_slice(), second_close) {
(&ast::Delimiter { token: token::LPAREN, .. },
[ast::TtToken(_, token::DOLLAR),
ast::TtToken(_, token::IDENT(name, false))],
&ast::Delimiter { token: token::RPAREN, .. })
(&ast::Delimiter { token: token::LParen, .. },
[ast::TtToken(_, token::Dollar),
ast::TtToken(_, token::Ident(name, token::Plain))],
&ast::Delimiter { token: token::RParen, .. })
if name.as_str() == "a" => {},
_ => fail!("value 4: {}", **second_delimed),
}
@ -842,10 +842,10 @@ mod test {
\"fields\":[\
null,\
{\
\"variant\":\"IDENT\",\
\"variant\":\"Ident\",\
\"fields\":[\
\"fn\",\
false\
\"Plain\"\
]\
}\
]\
@ -855,10 +855,10 @@ mod test {
\"fields\":[\
null,\
{\
\"variant\":\"IDENT\",\
\"variant\":\"Ident\",\
\"fields\":[\
\"a\",\
false\
\"Plain\"\
]\
}\
]\
@ -870,7 +870,7 @@ mod test {
[\
{\
\"span\":null,\
\"token\":\"LPAREN\"\
\"token\":\"LParen\"\
},\
[\
{\
@ -878,10 +878,10 @@ mod test {
\"fields\":[\
null,\
{\
\"variant\":\"IDENT\",\
\"variant\":\"Ident\",\
\"fields\":[\
\"b\",\
false\
\"Plain\"\
]\
}\
]\
@ -890,7 +890,7 @@ mod test {
\"variant\":\"TtToken\",\
\"fields\":[\
null,\
\"COLON\"\
\"Colon\"\
]\
},\
{\
@ -898,10 +898,10 @@ mod test {
\"fields\":[\
null,\
{\
\"variant\":\"IDENT\",\
\"variant\":\"Ident\",\
\"fields\":[\
\"int\",\
false\
\"Plain\"\
]\
}\
]\
@ -909,7 +909,7 @@ mod test {
],\
{\
\"span\":null,\
\"token\":\"RPAREN\"\
\"token\":\"RParen\"\
}\
]\
]\
@ -921,7 +921,7 @@ mod test {
[\
{\
\"span\":null,\
\"token\":\"LBRACE\"\
\"token\":\"LBrace\"\
},\
[\
{\
@ -929,10 +929,10 @@ mod test {
\"fields\":[\
null,\
{\
\"variant\":\"IDENT\",\
\"variant\":\"Ident\",\
\"fields\":[\
\"b\",\
false\
\"Plain\"\
]\
}\
]\
@ -941,13 +941,13 @@ mod test {
\"variant\":\"TtToken\",\
\"fields\":[\
null,\
\"SEMI\"\
\"Semi\"\
]\
}\
],\
{\
\"span\":null,\
\"token\":\"RBRACE\"\
\"token\":\"RBrace\"\
}\
]\
]\
@ -1002,7 +1002,7 @@ mod test {
}
fn parser_done(p: Parser){
assert_eq!(p.token.clone(), token::EOF);
assert_eq!(p.token.clone(), token::Eof);
}
#[test] fn parse_ident_pat () {

View File

@ -118,7 +118,7 @@ impl<'a> ParserObsoleteMethods for parser::Parser<'a> {
fn is_obsolete_ident(&mut self, ident: &str) -> bool {
match self.token {
token::IDENT(sid, _) => {
token::Ident(sid, _) => {
token::get_ident(sid).equiv(&ident)
}
_ => false

File diff suppressed because it is too large Load Diff

View File

@ -9,9 +9,7 @@
// except according to those terms.
use ast;
use ast::{Ident, Name, Mrk};
use ext::mtwt;
use parse::token;
use ptr::P;
use util::interner::{RcStr, StrInterner};
use util::interner;
@ -22,94 +20,377 @@ use std::mem;
use std::path::BytesContainer;
use std::rc::Rc;
// NOTE(stage0): remove these re-exports after the next snapshot
// (needed to allow quotations to pass stage0)
#[cfg(stage0)] pub use self::Plus as PLUS;
#[cfg(stage0)] pub use self::Minus as MINUS;
#[cfg(stage0)] pub use self::Star as STAR;
#[cfg(stage0)] pub use self::Slash as SLASH;
#[cfg(stage0)] pub use self::Percent as PERCENT;
#[cfg(stage0)] pub use self::Caret as CARET;
#[cfg(stage0)] pub use self::And as AND;
#[cfg(stage0)] pub use self::Or as OR;
#[cfg(stage0)] pub use self::Shl as SHL;
#[cfg(stage0)] pub use self::Shr as SHR;
#[cfg(stage0)] pub use self::Eq as EQ;
#[cfg(stage0)] pub use self::Lt as LT;
#[cfg(stage0)] pub use self::Le as LE;
#[cfg(stage0)] pub use self::EqEq as EQEQ;
#[cfg(stage0)] pub use self::Ne as NE;
#[cfg(stage0)] pub use self::Ge as GE;
#[cfg(stage0)] pub use self::Gt as GT;
#[cfg(stage0)] pub use self::AndAnd as ANDAND;
#[cfg(stage0)] pub use self::OrOr as OROR;
#[cfg(stage0)] pub use self::Not as NOT;
#[cfg(stage0)] pub use self::Tilde as TILDE;
#[cfg(stage0)] pub use self::BinOp as BINOP;
#[cfg(stage0)] pub use self::BinOpEq as BINOPEQ;
#[cfg(stage0)] pub use self::At as AT;
#[cfg(stage0)] pub use self::Dot as DOT;
#[cfg(stage0)] pub use self::DotDot as DOTDOT;
#[cfg(stage0)] pub use self::DotDotDot as DOTDOTDOT;
#[cfg(stage0)] pub use self::Comma as COMMA;
#[cfg(stage0)] pub use self::Semi as SEMI;
#[cfg(stage0)] pub use self::Colon as COLON;
#[cfg(stage0)] pub use self::ModSep as MOD_SEP;
#[cfg(stage0)] pub use self::RArrow as RARROW;
#[cfg(stage0)] pub use self::LArrow as LARROW;
#[cfg(stage0)] pub use self::FatArrow as FAT_ARROW;
#[cfg(stage0)] pub use self::LParen as LPAREN;
#[cfg(stage0)] pub use self::RParen as RPAREN;
#[cfg(stage0)] pub use self::LBracket as LBRACKET;
#[cfg(stage0)] pub use self::RBracket as RBRACKET;
#[cfg(stage0)] pub use self::LBrace as LBRACE;
#[cfg(stage0)] pub use self::RBrace as RBRACE;
#[cfg(stage0)] pub use self::Pound as POUND;
#[cfg(stage0)] pub use self::Dollar as DOLLAR;
#[cfg(stage0)] pub use self::Question as QUESTION;
#[cfg(stage0)] pub use self::LitByte as LIT_BYTE;
#[cfg(stage0)] pub use self::LitChar as LIT_CHAR;
#[cfg(stage0)] pub use self::LitInteger as LIT_INTEGER;
#[cfg(stage0)] pub use self::LitFloat as LIT_FLOAT;
#[cfg(stage0)] pub use self::LitStr as LIT_STR;
#[cfg(stage0)] pub use self::LitStrRaw as LIT_STR_RAW;
#[cfg(stage0)] pub use self::LitBinary as LIT_BINARY;
#[cfg(stage0)] pub use self::LitBinaryRaw as LIT_BINARY_RAW;
#[cfg(stage0)] pub use self::Ident as IDENT;
#[cfg(stage0)] pub use self::Underscore as UNDERSCORE;
#[cfg(stage0)] pub use self::Lifetime as LIFETIME;
#[cfg(stage0)] pub use self::Interpolated as INTERPOLATED;
#[cfg(stage0)] pub use self::DocComment as DOC_COMMENT;
#[cfg(stage0)] pub use self::Whitespace as WS;
#[cfg(stage0)] pub use self::Comment as COMMENT;
#[cfg(stage0)] pub use self::Shebang as SHEBANG;
#[cfg(stage0)] pub use self::Eof as EOF;
#[allow(non_camel_case_types)]
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash, Show)]
pub enum BinOp {
PLUS,
MINUS,
STAR,
SLASH,
PERCENT,
CARET,
AND,
OR,
SHL,
SHR,
pub enum BinOpToken {
Plus,
Minus,
Star,
Slash,
Percent,
Caret,
And,
Or,
Shl,
Shr,
}
#[cfg(stage0)]
#[allow(non_uppercase_statics)]
pub const ModName: bool = true;
#[cfg(stage0)]
#[allow(non_uppercase_statics)]
pub const Plain: bool = false;
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash, Show)]
#[cfg(not(stage0))]
pub enum IdentStyle {
/// `::` follows the identifier with no whitespace in-between.
ModName,
Plain,
}
#[allow(non_camel_case_types)]
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash, Show)]
pub enum Token {
/* Expression-operator symbols. */
EQ,
LT,
LE,
EQEQ,
NE,
GE,
GT,
ANDAND,
OROR,
NOT,
TILDE,
BINOP(BinOp),
BINOPEQ(BinOp),
Eq,
Lt,
Le,
EqEq,
Ne,
Ge,
Gt,
AndAnd,
OrOr,
Not,
Tilde,
BinOp(BinOpToken),
BinOpEq(BinOpToken),
/* Structural symbols */
AT,
DOT,
DOTDOT,
DOTDOTDOT,
COMMA,
SEMI,
COLON,
MOD_SEP,
RARROW,
LARROW,
FAT_ARROW,
LPAREN,
RPAREN,
LBRACKET,
RBRACKET,
LBRACE,
RBRACE,
POUND,
DOLLAR,
QUESTION,
At,
Dot,
DotDot,
DotDotDot,
Comma,
Semi,
Colon,
ModSep,
RArrow,
LArrow,
FatArrow,
LParen,
RParen,
LBracket,
RBracket,
LBrace,
RBrace,
Pound,
Dollar,
Question,
/* Literals */
LIT_BYTE(Name),
LIT_CHAR(Name),
LIT_INTEGER(Name),
LIT_FLOAT(Name),
LIT_STR(Name),
LIT_STR_RAW(Name, uint), /* raw str delimited by n hash symbols */
LIT_BINARY(Name),
LIT_BINARY_RAW(Name, uint), /* raw binary str delimited by n hash symbols */
LitByte(ast::Name),
LitChar(ast::Name),
LitInteger(ast::Name),
LitFloat(ast::Name),
LitStr(ast::Name),
LitStrRaw(ast::Name, uint), /* raw str delimited by n hash symbols */
LitBinary(ast::Name),
LitBinaryRaw(ast::Name, uint), /* raw binary str delimited by n hash symbols */
/* Name components */
/// An identifier contains an "is_mod_name" boolean,
/// indicating whether :: follows this token with no
/// whitespace in between.
IDENT(Ident, bool),
UNDERSCORE,
LIFETIME(Ident),
#[cfg(stage0)]
Ident(ast::Ident, bool),
#[cfg(not(stage0))]
Ident(ast::Ident, IdentStyle),
Underscore,
Lifetime(ast::Ident),
/* For interpolation */
INTERPOLATED(Nonterminal),
DOC_COMMENT(Name),
Interpolated(Nonterminal),
DocComment(ast::Name),
// Junk. These carry no data because we don't really care about the data
// they *would* carry, and don't really want to allocate a new ident for
// them. Instead, users could extract that from the associated span.
/// Whitespace
WS,
Whitespace,
/// Comment
COMMENT,
SHEBANG(Name),
Comment,
Shebang(ast::Name),
EOF,
Eof,
}
impl Token {
/// Returns `true` if the token can appear at the start of an expression.
pub fn can_begin_expr(&self) -> bool {
match *self {
LParen => true,
LBrace => true,
LBracket => true,
Ident(_, _) => true,
Underscore => true,
Tilde => true,
LitByte(_) => true,
LitChar(_) => true,
LitInteger(_) => true,
LitFloat(_) => true,
LitStr(_) => true,
LitStrRaw(_, _) => true,
LitBinary(_) => true,
LitBinaryRaw(_, _) => true,
Pound => true,
At => true,
Not => true,
BinOp(Minus) => true,
BinOp(Star) => true,
BinOp(And) => true,
BinOp(Or) => true, // in lambda syntax
OrOr => true, // in lambda syntax
ModSep => true,
Interpolated(NtExpr(..)) => true,
Interpolated(NtIdent(..)) => true,
Interpolated(NtBlock(..)) => true,
Interpolated(NtPath(..)) => true,
_ => false,
}
}
/// Returns the matching close delimiter if this is an open delimiter,
/// otherwise `None`.
pub fn get_close_delimiter(&self) -> Option<Token> {
match *self {
LParen => Some(RParen),
LBrace => Some(RBrace),
LBracket => Some(RBracket),
_ => None,
}
}
/// Returns `true` if the token is any literal
pub fn is_lit(&self) -> bool {
match *self {
LitByte(_) => true,
LitChar(_) => true,
LitInteger(_) => true,
LitFloat(_) => true,
LitStr(_) => true,
LitStrRaw(_, _) => true,
LitBinary(_) => true,
LitBinaryRaw(_, _) => true,
_ => false,
}
}
/// Returns `true` if the token is an identifier.
pub fn is_ident(&self) -> bool {
match *self {
Ident(_, _) => true,
_ => false,
}
}
/// Returns `true` if the token is an interpolated path.
pub fn is_path(&self) -> bool {
match *self {
Interpolated(NtPath(..)) => true,
_ => false,
}
}
/// Returns `true` if the token is a path that is not followed by a `::`
/// token.
#[allow(non_uppercase_statics)] // NOTE(stage0): remove this attribute after the next snapshot
pub fn is_plain_ident(&self) -> bool {
match *self {
Ident(_, Plain) => true,
_ => false,
}
}
/// Returns `true` if the token is a lifetime.
pub fn is_lifetime(&self) -> bool {
match *self {
Lifetime(..) => true,
_ => false,
}
}
/// Returns `true` if the token is either the `mut` or `const` keyword.
pub fn is_mutability(&self) -> bool {
self.is_keyword(keywords::Mut) ||
self.is_keyword(keywords::Const)
}
/// Maps a token to its corresponding binary operator.
pub fn to_binop(&self) -> Option<ast::BinOp> {
match *self {
BinOp(Star) => Some(ast::BiMul),
BinOp(Slash) => Some(ast::BiDiv),
BinOp(Percent) => Some(ast::BiRem),
BinOp(Plus) => Some(ast::BiAdd),
BinOp(Minus) => Some(ast::BiSub),
BinOp(Shl) => Some(ast::BiShl),
BinOp(Shr) => Some(ast::BiShr),
BinOp(And) => Some(ast::BiBitAnd),
BinOp(Caret) => Some(ast::BiBitXor),
BinOp(Or) => Some(ast::BiBitOr),
Lt => Some(ast::BiLt),
Le => Some(ast::BiLe),
Ge => Some(ast::BiGe),
Gt => Some(ast::BiGt),
EqEq => Some(ast::BiEq),
Ne => Some(ast::BiNe),
AndAnd => Some(ast::BiAnd),
OrOr => Some(ast::BiOr),
_ => None,
}
}
/// Returns `true` if the token is a given keyword, `kw`.
#[allow(non_uppercase_statics)] // NOTE(stage0): remove this attribute after the next snapshot
pub fn is_keyword(&self, kw: keywords::Keyword) -> bool {
match *self {
Ident(sid, Plain) => kw.to_name() == sid.name,
_ => false,
}
}
/// Returns `true` if the token is either a special identifier, or a strict
/// or reserved keyword.
#[allow(non_uppercase_statics)] // NOTE(stage0): remove this attribute after the next snapshot
pub fn is_any_keyword(&self) -> bool {
match *self {
Ident(sid, Plain) => {
let n = sid.name;
n == SELF_KEYWORD_NAME
|| n == STATIC_KEYWORD_NAME
|| n == SUPER_KEYWORD_NAME
|| STRICT_KEYWORD_START <= n
&& n <= RESERVED_KEYWORD_FINAL
},
_ => false
}
}
/// Returns `true` if the token may not appear as an identifier.
#[allow(non_uppercase_statics)] // NOTE(stage0): remove this attribute after the next snapshot
pub fn is_strict_keyword(&self) -> bool {
match *self {
Ident(sid, Plain) => {
let n = sid.name;
n == SELF_KEYWORD_NAME
|| n == STATIC_KEYWORD_NAME
|| n == SUPER_KEYWORD_NAME
|| STRICT_KEYWORD_START <= n
&& n <= STRICT_KEYWORD_FINAL
},
Ident(sid, ModName) => {
let n = sid.name;
n != SELF_KEYWORD_NAME
&& n != SUPER_KEYWORD_NAME
&& STRICT_KEYWORD_START <= n
&& n <= STRICT_KEYWORD_FINAL
}
_ => false,
}
}
/// Returns `true` if the token is a keyword that has been reserved for
/// possible future use.
#[allow(non_uppercase_statics)] // NOTE(stage0): remove this attribute after the next snapshot
pub fn is_reserved_keyword(&self) -> bool {
match *self {
Ident(sid, Plain) => {
let n = sid.name;
RESERVED_KEYWORD_START <= n
&& n <= RESERVED_KEYWORD_FINAL
},
_ => false,
}
}
/// Hygienic identifier equality comparison.
///
/// See `styntax::ext::mtwt`.
pub fn mtwt_eq(&self, other : &Token) -> bool {
match (self, other) {
(&Ident(id1,_), &Ident(id2,_)) | (&Lifetime(id1), &Lifetime(id2)) =>
mtwt::resolve(id1) == mtwt::resolve(id2),
_ => *self == *other
}
}
}
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash)]
@ -121,8 +402,10 @@ pub enum Nonterminal {
NtPat( P<ast::Pat>),
NtExpr( P<ast::Expr>),
NtTy( P<ast::Ty>),
/// See IDENT, above, for meaning of bool in NtIdent:
NtIdent(Box<Ident>, bool),
#[cfg(stage0)]
NtIdent(Box<ast::Ident>, bool),
#[cfg(not(stage0))]
NtIdent(Box<ast::Ident>, IdentStyle),
/// Stuff inside brackets for attributes
NtMeta( P<ast::MetaItem>),
NtPath(Box<ast::Path>),
@ -148,204 +431,6 @@ impl fmt::Show for Nonterminal {
}
}
pub fn binop_to_string(o: BinOp) -> &'static str {
match o {
PLUS => "+",
MINUS => "-",
STAR => "*",
SLASH => "/",
PERCENT => "%",
CARET => "^",
AND => "&",
OR => "|",
SHL => "<<",
SHR => ">>"
}
}
pub fn to_string(t: &Token) -> String {
match *t {
EQ => "=".into_string(),
LT => "<".into_string(),
LE => "<=".into_string(),
EQEQ => "==".into_string(),
NE => "!=".into_string(),
GE => ">=".into_string(),
GT => ">".into_string(),
NOT => "!".into_string(),
TILDE => "~".into_string(),
OROR => "||".into_string(),
ANDAND => "&&".into_string(),
BINOP(op) => binop_to_string(op).into_string(),
BINOPEQ(op) => {
let mut s = binop_to_string(op).into_string();
s.push_str("=");
s
}
/* Structural symbols */
AT => "@".into_string(),
DOT => ".".into_string(),
DOTDOT => "..".into_string(),
DOTDOTDOT => "...".into_string(),
COMMA => ",".into_string(),
SEMI => ";".into_string(),
COLON => ":".into_string(),
MOD_SEP => "::".into_string(),
RARROW => "->".into_string(),
LARROW => "<-".into_string(),
FAT_ARROW => "=>".into_string(),
LPAREN => "(".into_string(),
RPAREN => ")".into_string(),
LBRACKET => "[".into_string(),
RBRACKET => "]".into_string(),
LBRACE => "{".into_string(),
RBRACE => "}".into_string(),
POUND => "#".into_string(),
DOLLAR => "$".into_string(),
QUESTION => "?".into_string(),
/* Literals */
LIT_BYTE(b) => {
format!("b'{}'", b.as_str())
}
LIT_CHAR(c) => {
format!("'{}'", c.as_str())
}
LIT_INTEGER(c) | LIT_FLOAT(c) => {
c.as_str().into_string()
}
LIT_STR(s) => {
format!("\"{}\"", s.as_str())
}
LIT_STR_RAW(s, n) => {
format!("r{delim}\"{string}\"{delim}",
delim="#".repeat(n), string=s.as_str())
}
LIT_BINARY(v) => {
format!("b\"{}\"", v.as_str())
}
LIT_BINARY_RAW(s, n) => {
format!("br{delim}\"{string}\"{delim}",
delim="#".repeat(n), string=s.as_str())
}
/* Name components */
IDENT(s, _) => get_ident(s).get().into_string(),
LIFETIME(s) => {
format!("{}", get_ident(s))
}
UNDERSCORE => "_".into_string(),
/* Other */
DOC_COMMENT(s) => s.as_str().into_string(),
EOF => "<eof>".into_string(),
WS => " ".into_string(),
COMMENT => "/* */".into_string(),
SHEBANG(s) => format!("/* shebang: {}*/", s.as_str()),
INTERPOLATED(ref nt) => {
match nt {
&NtExpr(ref e) => ::print::pprust::expr_to_string(&**e),
&NtMeta(ref e) => ::print::pprust::meta_item_to_string(&**e),
&NtTy(ref e) => ::print::pprust::ty_to_string(&**e),
&NtPath(ref e) => ::print::pprust::path_to_string(&**e),
_ => {
let mut s = "an interpolated ".into_string();
match *nt {
NtItem(..) => s.push_str("item"),
NtBlock(..) => s.push_str("block"),
NtStmt(..) => s.push_str("statement"),
NtPat(..) => s.push_str("pattern"),
NtMeta(..) => fail!("should have been handled"),
NtExpr(..) => fail!("should have been handled"),
NtTy(..) => fail!("should have been handled"),
NtIdent(..) => s.push_str("identifier"),
NtPath(..) => fail!("should have been handled"),
NtTT(..) => s.push_str("tt"),
NtMatchers(..) => s.push_str("matcher sequence")
};
s
}
}
}
}
}
pub fn can_begin_expr(t: &Token) -> bool {
match *t {
LPAREN => true,
LBRACE => true,
LBRACKET => true,
IDENT(_, _) => true,
UNDERSCORE => true,
TILDE => true,
LIT_BYTE(_) => true,
LIT_CHAR(_) => true,
LIT_INTEGER(_) => true,
LIT_FLOAT(_) => true,
LIT_STR(_) => true,
LIT_STR_RAW(_, _) => true,
LIT_BINARY(_) => true,
LIT_BINARY_RAW(_, _) => true,
POUND => true,
AT => true,
NOT => true,
BINOP(MINUS) => true,
BINOP(STAR) => true,
BINOP(AND) => true,
BINOP(OR) => true, // in lambda syntax
OROR => true, // in lambda syntax
MOD_SEP => true,
INTERPOLATED(NtExpr(..))
| INTERPOLATED(NtIdent(..))
| INTERPOLATED(NtBlock(..))
| INTERPOLATED(NtPath(..)) => true,
_ => false
}
}
/// Returns the matching close delimiter if this is an open delimiter,
/// otherwise `None`.
pub fn close_delimiter_for(t: &Token) -> Option<Token> {
match *t {
LPAREN => Some(RPAREN),
LBRACE => Some(RBRACE),
LBRACKET => Some(RBRACKET),
_ => None
}
}
pub fn is_lit(t: &Token) -> bool {
match *t {
LIT_BYTE(_) => true,
LIT_CHAR(_) => true,
LIT_INTEGER(_) => true,
LIT_FLOAT(_) => true,
LIT_STR(_) => true,
LIT_STR_RAW(_, _) => true,
LIT_BINARY(_) => true,
LIT_BINARY_RAW(_, _) => true,
_ => false
}
}
pub fn is_ident(t: &Token) -> bool {
match *t { IDENT(_, _) => true, _ => false }
}
pub fn is_ident_or_path(t: &Token) -> bool {
match *t {
IDENT(_, _) | INTERPOLATED(NtPath(..)) => true,
_ => false
}
}
pub fn is_plain_ident(t: &Token) -> bool {
match *t { IDENT(_, false) => true, _ => false }
}
// Get the first "argument"
macro_rules! first {
( $first:expr, $( $remainder:expr, )* ) => ( $first )
@ -376,22 +461,28 @@ macro_rules! declare_special_idents_and_keywords {(
$( ($rk_name:expr, $rk_variant:ident, $rk_str:expr); )*
}
) => {
static STRICT_KEYWORD_START: Name = first!($( Name($sk_name), )*);
static STRICT_KEYWORD_FINAL: Name = last!($( Name($sk_name), )*);
static RESERVED_KEYWORD_START: Name = first!($( Name($rk_name), )*);
static RESERVED_KEYWORD_FINAL: Name = last!($( Name($rk_name), )*);
static STRICT_KEYWORD_START: ast::Name = first!($( ast::Name($sk_name), )*);
static STRICT_KEYWORD_FINAL: ast::Name = last!($( ast::Name($sk_name), )*);
static RESERVED_KEYWORD_START: ast::Name = first!($( ast::Name($rk_name), )*);
static RESERVED_KEYWORD_FINAL: ast::Name = last!($( ast::Name($rk_name), )*);
pub mod special_idents {
use ast::{Ident, Name};
use ast;
$(
#[allow(non_uppercase_statics)]
pub const $si_static: Ident = Ident { name: Name($si_name), ctxt: 0 };
pub const $si_static: ast::Ident = ast::Ident {
name: ast::Name($si_name),
ctxt: 0,
};
)*
}
pub mod special_names {
use ast::Name;
$( #[allow(non_uppercase_statics)] pub const $si_static: Name = Name($si_name); )*
use ast;
$(
#[allow(non_uppercase_statics)]
pub const $si_static: ast::Name = ast::Name($si_name);
)*
}
/**
@ -402,7 +493,7 @@ macro_rules! declare_special_idents_and_keywords {(
* the language and may not appear as identifiers.
*/
pub mod keywords {
use ast::Name;
use ast;
pub enum Keyword {
$( $sk_variant, )*
@ -410,10 +501,10 @@ macro_rules! declare_special_idents_and_keywords {(
}
impl Keyword {
pub fn to_name(&self) -> Name {
pub fn to_name(&self) -> ast::Name {
match *self {
$( $sk_variant => Name($sk_name), )*
$( $rk_variant => Name($rk_name), )*
$( $sk_variant => ast::Name($sk_name), )*
$( $rk_variant => ast::Name($rk_name), )*
}
}
}
@ -432,9 +523,9 @@ macro_rules! declare_special_idents_and_keywords {(
}}
// If the special idents get renumbered, remember to modify these two as appropriate
pub const SELF_KEYWORD_NAME: Name = Name(SELF_KEYWORD_NAME_NUM);
const STATIC_KEYWORD_NAME: Name = Name(STATIC_KEYWORD_NAME_NUM);
const SUPER_KEYWORD_NAME: Name = Name(SUPER_KEYWORD_NAME_NUM);
pub const SELF_KEYWORD_NAME: ast::Name = ast::Name(SELF_KEYWORD_NAME_NUM);
const STATIC_KEYWORD_NAME: ast::Name = ast::Name(STATIC_KEYWORD_NAME_NUM);
const SUPER_KEYWORD_NAME: ast::Name = ast::Name(SUPER_KEYWORD_NAME_NUM);
pub const SELF_KEYWORD_NAME_NUM: u32 = 1;
const STATIC_KEYWORD_NAME_NUM: u32 = 2;
@ -526,34 +617,6 @@ declare_special_idents_and_keywords! {
}
}
/**
* Maps a token to a record specifying the corresponding binary
* operator
*/
pub fn token_to_binop(tok: &Token) -> Option<ast::BinOp> {
match *tok {
BINOP(STAR) => Some(ast::BiMul),
BINOP(SLASH) => Some(ast::BiDiv),
BINOP(PERCENT) => Some(ast::BiRem),
BINOP(PLUS) => Some(ast::BiAdd),
BINOP(MINUS) => Some(ast::BiSub),
BINOP(SHL) => Some(ast::BiShl),
BINOP(SHR) => Some(ast::BiShr),
BINOP(AND) => Some(ast::BiBitAnd),
BINOP(CARET) => Some(ast::BiBitXor),
BINOP(OR) => Some(ast::BiBitOr),
LT => Some(ast::BiLt),
LE => Some(ast::BiLe),
GE => Some(ast::BiGe),
GT => Some(ast::BiGt),
EQEQ => Some(ast::BiEq),
NE => Some(ast::BiNe),
ANDAND => Some(ast::BiAnd),
OROR => Some(ast::BiOr),
_ => None
}
}
// looks like we can get rid of this completely...
pub type IdentInterner = StrInterner;
@ -646,7 +709,7 @@ impl<S:Encoder<E>, E> Encodable<S, E> for InternedString {
/// Returns the string contents of a name, using the task-local interner.
#[inline]
pub fn get_name(name: Name) -> InternedString {
pub fn get_name(name: ast::Name) -> InternedString {
let interner = get_ident_interner();
InternedString::new_from_rc_str(interner.get(name))
}
@ -654,7 +717,7 @@ pub fn get_name(name: Name) -> InternedString {
/// Returns the string contents of an identifier, using the task-local
/// interner.
#[inline]
pub fn get_ident(ident: Ident) -> InternedString {
pub fn get_ident(ident: ast::Ident) -> InternedString {
get_name(ident.name)
}
@ -667,32 +730,32 @@ pub fn intern_and_get_ident(s: &str) -> InternedString {
/// Maps a string to its interned representation.
#[inline]
pub fn intern(s: &str) -> Name {
pub fn intern(s: &str) -> ast::Name {
get_ident_interner().intern(s)
}
/// gensym's a new uint, using the current interner.
#[inline]
pub fn gensym(s: &str) -> Name {
pub fn gensym(s: &str) -> ast::Name {
get_ident_interner().gensym(s)
}
/// Maps a string to an identifier with an empty syntax context.
#[inline]
pub fn str_to_ident(s: &str) -> Ident {
Ident::new(intern(s))
pub fn str_to_ident(s: &str) -> ast::Ident {
ast::Ident::new(intern(s))
}
/// Maps a string to a gensym'ed identifier.
#[inline]
pub fn gensym_ident(s: &str) -> Ident {
Ident::new(gensym(s))
pub fn gensym_ident(s: &str) -> ast::Ident {
ast::Ident::new(gensym(s))
}
// create a fresh name that maps to the same string as the old one.
// note that this guarantees that str_ptr_eq(ident_to_string(src),interner_get(fresh_name(src)));
// that is, that the new name and the old one are connected to ptr_eq strings.
pub fn fresh_name(src: &Ident) -> Name {
pub fn fresh_name(src: &ast::Ident) -> ast::Name {
let interner = get_ident_interner();
interner.gensym_copy(src.name)
// following: debug version. Could work in final except that it's incompatible with
@ -703,78 +766,10 @@ pub fn fresh_name(src: &Ident) -> Name {
}
// create a fresh mark.
pub fn fresh_mark() -> Mrk {
pub fn fresh_mark() -> ast::Mrk {
gensym("mark").uint() as u32
}
// See the macro above about the types of keywords
pub fn is_keyword(kw: keywords::Keyword, tok: &Token) -> bool {
match *tok {
token::IDENT(sid, false) => { kw.to_name() == sid.name }
_ => { false }
}
}
pub fn is_any_keyword(tok: &Token) -> bool {
match *tok {
token::IDENT(sid, false) => {
let n = sid.name;
n == SELF_KEYWORD_NAME
|| n == STATIC_KEYWORD_NAME
|| n == SUPER_KEYWORD_NAME
|| STRICT_KEYWORD_START <= n
&& n <= RESERVED_KEYWORD_FINAL
},
_ => false
}
}
pub fn is_strict_keyword(tok: &Token) -> bool {
match *tok {
token::IDENT(sid, false) => {
let n = sid.name;
n == SELF_KEYWORD_NAME
|| n == STATIC_KEYWORD_NAME
|| n == SUPER_KEYWORD_NAME
|| STRICT_KEYWORD_START <= n
&& n <= STRICT_KEYWORD_FINAL
},
token::IDENT(sid, true) => {
let n = sid.name;
n != SELF_KEYWORD_NAME
&& n != SUPER_KEYWORD_NAME
&& STRICT_KEYWORD_START <= n
&& n <= STRICT_KEYWORD_FINAL
}
_ => false,
}
}
pub fn is_reserved_keyword(tok: &Token) -> bool {
match *tok {
token::IDENT(sid, false) => {
let n = sid.name;
RESERVED_KEYWORD_START <= n
&& n <= RESERVED_KEYWORD_FINAL
},
_ => false,
}
}
pub fn mtwt_token_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
(&IDENT(id1,_),&IDENT(id2,_)) | (&LIFETIME(id1),&LIFETIME(id2)) =>
mtwt::resolve(id1) == mtwt::resolve(id2),
_ => *t1 == *t2
}
}
#[cfg(test)]
mod test {
use super::*;
@ -786,9 +781,9 @@ mod test {
}
#[test] fn mtwt_token_eq_test() {
assert!(mtwt_token_eq(&GT,&GT));
assert!(Gt.mtwt_eq(&Gt));
let a = str_to_ident("bac");
let a1 = mark_ident(a,92);
assert!(mtwt_token_eq(&IDENT(a,true),&IDENT(a1,false)));
assert!(Ident(a, ModName).mtwt_eq(&Ident(a1, Plain)));
}
}

View File

@ -21,6 +21,7 @@ use attr::{AttrMetaMethods, AttributeMethods};
use codemap::{CodeMap, BytePos};
use codemap;
use diagnostic;
use parse::token::{BinOpToken, Token};
use parse::token;
use parse::lexer::comments;
use parse;
@ -181,6 +182,101 @@ pub fn to_string(f: |&mut State| -> IoResult<()>) -> String {
}
}
pub fn binop_to_string(op: BinOpToken) -> &'static str {
match op {
token::Plus => "+",
token::Minus => "-",
token::Star => "*",
token::Slash => "/",
token::Percent => "%",
token::Caret => "^",
token::And => "&",
token::Or => "|",
token::Shl => "<<",
token::Shr => ">>",
}
}
pub fn token_to_string(tok: &Token) -> String {
match *tok {
token::Eq => "=".into_string(),
token::Lt => "<".into_string(),
token::Le => "<=".into_string(),
token::EqEq => "==".into_string(),
token::Ne => "!=".into_string(),
token::Ge => ">=".into_string(),
token::Gt => ">".into_string(),
token::Not => "!".into_string(),
token::Tilde => "~".into_string(),
token::OrOr => "||".into_string(),
token::AndAnd => "&&".into_string(),
token::BinOp(op) => binop_to_string(op).into_string(),
token::BinOpEq(op) => format!("{}=", binop_to_string(op)),
/* Structural symbols */
token::At => "@".into_string(),
token::Dot => ".".into_string(),
token::DotDot => "..".into_string(),
token::DotDotDot => "...".into_string(),
token::Comma => ",".into_string(),
token::Semi => ";".into_string(),
token::Colon => ":".into_string(),
token::ModSep => "::".into_string(),
token::RArrow => "->".into_string(),
token::LArrow => "<-".into_string(),
token::FatArrow => "=>".into_string(),
token::LParen => "(".into_string(),
token::RParen => ")".into_string(),
token::LBracket => "[".into_string(),
token::RBracket => "]".into_string(),
token::LBrace => "{".into_string(),
token::RBrace => "}".into_string(),
token::Pound => "#".into_string(),
token::Dollar => "$".into_string(),
token::Question => "?".into_string(),
/* Literals */
token::LitByte(b) => format!("b'{}'", b.as_str()),
token::LitChar(c) => format!("'{}'", c.as_str()),
token::LitFloat(c) => c.as_str().into_string(),
token::LitInteger(c) => c.as_str().into_string(),
token::LitStr(s) => format!("\"{}\"", s.as_str()),
token::LitStrRaw(s, n) => format!("r{delim}\"{string}\"{delim}",
delim="#".repeat(n),
string=s.as_str()),
token::LitBinary(v) => format!("b\"{}\"", v.as_str()),
token::LitBinaryRaw(s, n) => format!("br{delim}\"{string}\"{delim}",
delim="#".repeat(n),
string=s.as_str()),
/* Name components */
token::Ident(s, _) => token::get_ident(s).get().into_string(),
token::Lifetime(s) => format!("{}", token::get_ident(s)),
token::Underscore => "_".into_string(),
/* Other */
token::DocComment(s) => s.as_str().into_string(),
token::Eof => "<eof>".into_string(),
token::Whitespace => " ".into_string(),
token::Comment => "/* */".into_string(),
token::Shebang(s) => format!("/* shebang: {}*/", s.as_str()),
token::Interpolated(ref nt) => match *nt {
token::NtExpr(ref e) => expr_to_string(&**e),
token::NtMeta(ref e) => meta_item_to_string(&**e),
token::NtTy(ref e) => ty_to_string(&**e),
token::NtPath(ref e) => path_to_string(&**e),
token::NtItem(..) => "an interpolated item".into_string(),
token::NtBlock(..) => "an interpolated block".into_string(),
token::NtStmt(..) => "an interpolated statement".into_string(),
token::NtPat(..) => "an interpolated pattern".into_string(),
token::NtIdent(..) => "an interpolated identifier".into_string(),
token::NtTT(..) => "an interpolated tt".into_string(),
token::NtMatchers(..) => "an interpolated matcher sequence".into_string(),
}
}
}
// FIXME (Issue #16472): the thing_to_string_impls macro should go away
// after we revise the syntax::ext::quote::ToToken impls to go directly
// to token-trees instead of thing -> string -> token-trees.
@ -1026,16 +1122,16 @@ impl<'a> State<'a> {
match *tt {
ast::TtDelimited(_, ref delimed) => {
let (ref open, ref tts, ref close) = **delimed;
try!(word(&mut self.s, parse::token::to_string(&open.token).as_slice()));
try!(word(&mut self.s, token_to_string(&open.token).as_slice()));
try!(space(&mut self.s));
try!(self.print_tts(tts.as_slice()));
try!(space(&mut self.s));
word(&mut self.s, parse::token::to_string(&close.token).as_slice())
word(&mut self.s, token_to_string(&close.token).as_slice())
},
ast::TtToken(_, ref tk) => {
try!(word(&mut self.s, parse::token::to_string(tk).as_slice()));
try!(word(&mut self.s, token_to_string(tk).as_slice()));
match *tk {
parse::token::DOC_COMMENT(..) => {
parse::token::DocComment(..) => {
hardbreak(&mut self.s)
}
_ => Ok(())
@ -1049,10 +1145,9 @@ impl<'a> State<'a> {
try!(word(&mut self.s, ")"));
match *separator {
Some(ref tk) => {
try!(word(&mut self.s,
parse::token::to_string(tk).as_slice()));
try!(word(&mut self.s, token_to_string(tk).as_slice()));
}
None => ()
None => {},
}
match kleene_op {
ast::ZeroOrMore => word(&mut self.s, "*"),

View File

@ -17,7 +17,7 @@ extern crate syntax;
extern crate rustc;
use syntax::codemap::Span;
use syntax::parse::token::{IDENT, get_ident};
use syntax::parse::token;
use syntax::ast::{TokenTree, TtToken};
use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacExpr};
use syntax::ext::build::AstBuilder; // trait for expr_uint
@ -39,7 +39,7 @@ fn expand_rn(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree])
("I", 1)];
let text = match args {
[TtToken(_, IDENT(s, _))] => get_ident(s).to_string(),
[TtToken(_, token::Ident(s, _))] => token::get_ident(s).to_string(),
_ => {
cx.span_err(sp, "argument should be a single identifier");
return DummyResult::any(sp);

View File

@ -8,4 +8,4 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
type t = { f: () }; //~ ERROR expected type, found token LBRACE
type t = { f: () }; //~ ERROR expected type, found token LBrace