rust/src/libsyntax/ext/tt/macro_parser.rs

444 lines
16 KiB
Rust
Raw Normal View History

// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
2012-06-12 12:59:50 -05:00
// Earley-like parser for macros.
use ast;
2012-09-04 13:37:29 -05:00
use ast::{matcher, match_tok, match_seq, match_nonterminal, ident};
2013-01-30 11:56:33 -06:00
use codemap::{BytePos, mk_sp};
use codemap;
use parse::lexer::*; //resolve bug?
use parse::ParseSess;
use parse::parser::Parser;
use parse::token::{Token, EOF, to_str, nonterminal, get_ident_interner, ident_to_str};
use parse::token;
2013-06-24 19:40:33 -05:00
use std::hashmap::HashMap;
use std::uint;
use std::vec;
2012-06-12 12:59:50 -05:00
/* This is an Earley-like parser, without support for in-grammar nonterminals,
only by calling out to the main rust parser for named nonterminals (which it
commits to fully when it hits one in a grammar). This means that there are no
completer or predictor rules, and therefore no need to store one column per
token: instead, there's a set of current Earley items and a set of next
ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
pathological cases, is worse than traditional Earley parsing, but it's an
easier fit for Macro-by-Example-style rules, and I think the overhead is
lower. (In order to prevent the pathological case, we'd need to lazily
construct the resulting `named_match`es at the very end. It'd be a pain,
and require more memory to keep around old items, but it would also save
overhead)*/
/* Quick intro to how the parser works:
A 'position' is a dot in the middle of a matcher, usually represented as a
dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`.
The parser walks through the input a character at a time, maintaining a list
of items consistent with the current position in the input string: `cur_eis`.
As it processes them, it fills up `eof_eis` with items that would be valid if
the macro invocation is now over, `bb_eis` with items that are waiting on
a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting
on the a particular token. Most of the logic concerns moving the · through the
repetitions indicated by Kleene stars. It only advances or calls out to the
real Rust parser when no `cur_eis` items remain
Example: Start parsing `a a a a b` against [· a $( a )* a b].
Remaining input: `a a a a b`
next_eis: [· a $( a )* a b]
- - - Advance over an `a`. - - -
Remaining input: `a a a b`
cur: [a · $( a )* a b]
Descend/Skip (first item).
next: [a $( · a )* a b] [a $( a )* · a b].
- - - Advance over an `a`. - - -
Remaining input: `a a b`
cur: [a $( a · )* a b] next: [a $( a )* a · b]
Finish/Repeat (first item)
next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
- - - Advance over an `a`. - - - (this looks exactly like the last step)
Remaining input: `a b`
cur: [a $( a · )* a b] next: [a $( a )* a · b]
Finish/Repeat (first item)
next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
- - - Advance over an `a`. - - - (this looks exactly like the last step)
Remaining input: `b`
cur: [a $( a · )* a b] next: [a $( a )* a · b]
Finish/Repeat (first item)
next: [a $( a )* · a b] [a $( · a )* a b]
- - - Advance over a `b`. - - -
Remaining input: ``
eof: [a $( a )* a b ·]
*/
2012-06-12 12:59:50 -05:00
/* to avoid costly uniqueness checks, we require that `match_seq` always has a
2012-06-12 12:59:50 -05:00
nonempty body. */
2013-07-02 14:47:32 -05:00
#[deriving(Clone)]
pub enum matcher_pos_up { /* to break a circularity */
matcher_pos_up(Option<~MatcherPos>)
2012-06-12 12:59:50 -05:00
}
pub fn is_some(mpu: &matcher_pos_up) -> bool {
match *mpu {
matcher_pos_up(None) => false,
2012-08-03 21:59:04 -05:00
_ => true
2012-06-12 12:59:50 -05:00
}
}
2013-07-02 14:47:32 -05:00
#[deriving(Clone)]
pub struct MatcherPos {
elts: ~[ast::matcher], // maybe should be <'>? Need to understand regions.
sep: Option<Token>,
idx: uint,
up: matcher_pos_up, // mutable for swapping only
2013-03-07 17:37:22 -06:00
matches: ~[~[@named_match]],
match_lo: uint, match_hi: uint,
sp_lo: BytePos,
}
2012-06-12 12:59:50 -05:00
pub fn copy_up(mpu: &matcher_pos_up) -> ~MatcherPos {
match *mpu {
2013-07-02 14:47:32 -05:00
matcher_pos_up(Some(ref mp)) => (*mp).clone(),
_ => fail!()
2012-06-12 12:59:50 -05:00
}
}
pub fn count_names(ms: &[matcher]) -> uint {
do ms.iter().fold(0) |ct, m| {
2012-08-06 14:34:08 -05:00
ct + match m.node {
2012-08-03 21:59:04 -05:00
match_tok(_) => 0u,
match_seq(ref more_ms, _, _, _, _) => count_names((*more_ms)),
2012-08-03 21:59:04 -05:00
match_nonterminal(_,_,_) => 1u
}}
2012-06-12 12:59:50 -05:00
}
pub fn initial_matcher_pos(ms: ~[matcher], sep: Option<Token>, lo: BytePos)
-> ~MatcherPos {
let mut match_idx_hi = 0u;
for ms.iter().advance |elt| {
2012-08-06 14:34:08 -05:00
match elt.node {
2012-08-03 21:59:04 -05:00
match_tok(_) => (),
match_seq(_,_,_,_,hi) => {
match_idx_hi = hi; // it is monotonic...
}
2012-08-03 21:59:04 -05:00
match_nonterminal(_,_,pos) => {
match_idx_hi = pos+1u; // ...so latest is highest
}
}
}
2013-03-07 17:37:22 -06:00
let matches = vec::from_fn(count_names(ms), |_i| ~[]);
~MatcherPos {
elts: ms,
sep: sep,
idx: 0u,
up: matcher_pos_up(None),
matches: matches,
match_lo: 0u,
match_hi: match_idx_hi,
sp_lo: lo
}
2012-06-12 12:59:50 -05:00
}
// named_match is a pattern-match result for a single ast::match_nonterminal:
// so it is associated with a single ident in a parse, and all
// matched_nonterminals in the named_match have the same nonterminal type
// (expr, item, etc). All the leaves in a single named_match correspond to a
// single matcher_nonterminal in the ast::matcher that produced it.
//
// It should probably be renamed, it has more or less exact correspondence to
// ast::match nodes, and the in-memory structure of a particular named_match
// represents the match that occurred when a particular subset of an
// ast::match -- those ast::matcher nodes leading to a single
// match_nonterminal -- was applied to a particular token tree.
//
// The width of each matched_seq in the named_match, and the identity of the
// matched_nonterminals, will depend on the token tree it was applied to: each
// matched_seq corresponds to a single match_seq in the originating
// ast::matcher. The depth of the named_match structure will therefore depend
// only on the nesting depth of ast::match_seqs in the originating
// ast::matcher it was derived from.
pub enum named_match {
matched_seq(~[@named_match], codemap::span),
matched_nonterminal(nonterminal)
}
2012-06-12 12:59:50 -05:00
pub type earley_item = ~MatcherPos;
2012-06-12 12:59:50 -05:00
pub fn nameize(p_s: @mut ParseSess, ms: &[matcher], res: &[@named_match])
-> HashMap<ident,@named_match> {
fn n_rec(p_s: @mut ParseSess, m: &matcher, res: &[@named_match],
ret_val: &mut HashMap<ident, @named_match>) {
match *m {
2013-01-30 11:56:33 -06:00
codemap::spanned {node: match_tok(_), _} => (),
codemap::spanned {node: match_seq(ref more_ms, _, _, _, _), _} => {
for more_ms.iter().advance |next_m| {
n_rec(p_s, next_m, res, ret_val)
2012-12-04 23:13:02 -06:00
};
}
2013-01-30 11:56:33 -06:00
codemap::spanned {
node: match_nonterminal(ref bind_name, _, idx), span: sp
} => {
if ret_val.contains_key(bind_name) {
2012-07-18 18:18:02 -05:00
p_s.span_diagnostic.span_fatal(sp, ~"Duplicated bind name: "+
ident_to_str(bind_name))
}
ret_val.insert(*bind_name, res[idx]);
}
}
}
let mut ret_val = HashMap::new();
for ms.iter().advance |m| { n_rec(p_s, m, res, &mut ret_val) }
ret_val
}
pub enum parse_result {
success(HashMap<ident, @named_match>),
2012-08-10 12:46:04 -05:00
failure(codemap::span, ~str),
error(codemap::span, ~str)
}
pub fn parse_or_else(
sess: @mut ParseSess,
cfg: ast::crate_cfg,
rdr: @reader,
ms: ~[matcher]
) -> HashMap<ident, @named_match> {
2012-08-06 14:34:08 -05:00
match parse(sess, cfg, rdr, ms) {
2012-08-03 21:59:04 -05:00
success(m) => m,
2013-03-21 14:41:37 -05:00
failure(sp, str) => sess.span_diagnostic.span_fatal(sp, str),
error(sp, str) => sess.span_diagnostic.span_fatal(sp, str)
}
}
pub fn parse(
sess: @mut ParseSess,
cfg: ast::crate_cfg,
rdr: @reader,
ms: &[matcher]
) -> parse_result {
let mut cur_eis = ~[];
cur_eis.push(initial_matcher_pos(ms.to_owned(), None, rdr.peek().sp.lo));
2012-06-12 12:59:50 -05:00
loop {
let mut bb_eis = ~[]; // black-box parsed by parser.rs
let mut next_eis = ~[]; // or proceed normally
let mut eof_eis = ~[];
2012-06-12 12:59:50 -05:00
2013-01-30 11:56:33 -06:00
let TokenAndSpan {tok: tok, sp: sp} = rdr.peek();
2012-06-12 12:59:50 -05:00
/* we append new items to this while we go */
2013-05-09 15:27:24 -05:00
while !cur_eis.is_empty() { /* for each Earley Item */
let ei = cur_eis.pop();
2012-06-12 12:59:50 -05:00
let idx = ei.idx;
let len = ei.elts.len();
/* at end of sequence */
if idx >= len {
// can't move out of `match`es, so:
if is_some(&ei.up) {
2012-06-12 12:59:50 -05:00
// hack: a matcher sequence is repeating iff it has a
// parent (the top level is just a container)
// disregard separator, try to go up
// (remove this condition to make trailing seps ok)
if idx == len {
// pop from the matcher position
let mut new_pos = copy_up(&ei.up);
2012-06-12 12:59:50 -05:00
// update matches (the MBE "parse tree") by appending
// each tree as a subtree.
// I bet this is a perf problem: we're preemptively
// doing a lot of array work that will get thrown away
// most of the time.
// Only touch the binders we have actually bound
for uint::range(ei.match_lo, ei.match_hi) |idx| {
2013-07-02 14:47:32 -05:00
let sub = ei.matches[idx].clone();
new_pos.matches[idx]
.push(@matched_seq(sub,
mk_sp(ei.sp_lo,
sp.hi)));
2012-06-12 12:59:50 -05:00
}
2012-09-10 20:28:00 -05:00
new_pos.idx += 1;
cur_eis.push(new_pos);
2012-06-12 12:59:50 -05:00
}
// can we go around again?
// the *_t vars are workarounds for the lack of unary move
2013-07-02 14:47:32 -05:00
match ei.sep {
Some(ref t) if idx == len => { // we need a separator
if tok == (*t) { //pass the separator
2013-07-02 14:47:32 -05:00
let mut ei_t = ei.clone();
2012-09-10 20:28:00 -05:00
ei_t.idx += 1;
next_eis.push(ei_t);
2012-06-12 12:59:50 -05:00
}
}
2012-08-03 21:59:04 -05:00
_ => { // we don't need a separator
let mut ei_t = ei;
2012-09-10 20:28:00 -05:00
ei_t.idx = 0;
cur_eis.push(ei_t);
2012-06-12 12:59:50 -05:00
}
}
} else {
eof_eis.push(ei);
2012-06-12 12:59:50 -05:00
}
} else {
2013-07-02 14:47:32 -05:00
match ei.elts[idx].node.clone() {
2012-06-12 12:59:50 -05:00
/* need to descend into sequence */
match_seq(ref matchers, ref sep, zero_ok,
2012-08-03 21:59:04 -05:00
match_idx_lo, match_idx_hi) => {
2012-06-12 12:59:50 -05:00
if zero_ok {
2013-07-02 14:47:32 -05:00
let mut new_ei = ei.clone();
2012-06-12 12:59:50 -05:00
new_ei.idx += 1u;
//we specifically matched zero repeats.
for uint::range(match_idx_lo, match_idx_hi) |idx| {
new_ei.matches[idx].push(@matched_seq(~[], sp));
}
cur_eis.push(new_ei);
2012-06-12 12:59:50 -05:00
}
let matches = vec::from_elem(ei.matches.len(), ~[]);
let ei_t = ei;
cur_eis.push(~MatcherPos {
2013-07-02 14:47:32 -05:00
elts: (*matchers).clone(),
sep: (*sep).clone(),
idx: 0u,
up: matcher_pos_up(Some(ei_t)),
matches: matches,
match_lo: match_idx_lo, match_hi: match_idx_hi,
sp_lo: sp.lo
2012-06-12 12:59:50 -05:00
});
}
match_nonterminal(_,_,_) => { bb_eis.push(ei) }
match_tok(ref t) => {
2013-07-02 14:47:32 -05:00
let mut ei_t = ei.clone();
if (*t) == tok {
2012-09-10 20:28:00 -05:00
ei_t.idx += 1;
next_eis.push(ei_t);
2012-09-10 20:28:00 -05:00
}
2012-06-12 12:59:50 -05:00
}
}
}
}
/* error messages here could be improved with links to orig. rules */
if tok == EOF {
if eof_eis.len() == 1u {
2013-03-07 17:37:22 -06:00
let mut v = ~[];
for eof_eis[0u].matches.mut_iter().advance |dv| {
2013-03-07 17:37:22 -06:00
v.push(dv.pop());
}
return success(nameize(sess, ms, v));
2012-06-12 12:59:50 -05:00
} else if eof_eis.len() > 1u {
2012-08-10 12:46:04 -05:00
return error(sp, ~"Ambiguity: multiple successful parses");
2012-06-12 12:59:50 -05:00
} else {
2012-08-01 19:30:05 -05:00
return failure(sp, ~"Unexpected end of macro invocation");
2012-06-12 12:59:50 -05:00
}
} else {
if (bb_eis.len() > 0u && next_eis.len() > 0u)
|| bb_eis.len() > 1u {
2013-06-10 11:34:14 -05:00
let nts = bb_eis.map(|ei| {
2012-08-06 14:34:08 -05:00
match ei.elts[ei.idx].node {
match_nonterminal(ref bind,ref name,_) => {
fmt!("%s ('%s')", ident_to_str(name),
ident_to_str(bind))
2012-07-24 13:44:17 -05:00
}
_ => fail!()
2013-06-10 11:34:14 -05:00
} }).connect(" or ");
2012-08-22 19:24:52 -05:00
return error(sp, fmt!(
"Local ambiguity: multiple parsing options: \
built-in NTs %s or %u other options.",
2012-08-22 19:24:52 -05:00
nts, next_eis.len()));
2012-06-12 12:59:50 -05:00
} else if (bb_eis.len() == 0u && next_eis.len() == 0u) {
return failure(sp, ~"No rules expected the token: "
2013-05-17 12:18:35 -05:00
+ to_str(get_ident_interner(), &tok));
2012-06-12 12:59:50 -05:00
} else if (next_eis.len() > 0u) {
/* Now process the next token */
while(next_eis.len() > 0u) {
2012-09-28 00:20:47 -05:00
cur_eis.push(next_eis.pop());
2012-06-12 12:59:50 -05:00
}
rdr.next_token();
} else /* bb_eis.len() == 1 */ {
2013-07-02 14:47:32 -05:00
let rust_parser = Parser(sess, cfg.clone(), rdr.dup());
2012-06-12 12:59:50 -05:00
let mut ei = bb_eis.pop();
2012-08-06 14:34:08 -05:00
match ei.elts[ei.idx].node {
match_nonterminal(_, ref name, idx) => {
ei.matches[idx].push(@matched_nonterminal(
parse_nt(&rust_parser, ident_to_str(name))));
2012-06-12 12:59:50 -05:00
ei.idx += 1u;
}
_ => fail!()
2012-06-12 12:59:50 -05:00
}
cur_eis.push(ei);
2012-06-12 12:59:50 -05:00
for rust_parser.tokens_consumed.times() || {
rdr.next_token();
2012-06-12 12:59:50 -05:00
}
}
}
2013-03-28 20:39:09 -05:00
assert!(cur_eis.len() > 0u);
2012-06-12 12:59:50 -05:00
}
}
pub fn parse_nt(p: &Parser, name: &str) -> nonterminal {
2012-08-06 14:34:08 -05:00
match name {
"item" => match p.parse_item(~[]) {
2012-08-20 14:23:37 -05:00
Some(i) => token::nt_item(i),
None => p.fatal("expected an item keyword")
},
"block" => token::nt_block(p.parse_block()),
"stmt" => token::nt_stmt(p.parse_stmt(~[])),
2013-05-29 18:59:33 -05:00
"pat" => token::nt_pat(p.parse_pat()),
"expr" => token::nt_expr(p.parse_expr()),
"ty" => token::nt_ty(p.parse_ty(false /* no need to disambiguate*/)),
2012-06-12 12:59:50 -05:00
// this could be handled like a token, since it is one
"ident" => match *p.token {
2012-08-03 21:59:04 -05:00
token::IDENT(sn,b) => { p.bump(); token::nt_ident(sn,b) }
_ => p.fatal(~"expected ident, found "
2013-07-02 14:47:32 -05:00
+ token::to_str(get_ident_interner(), p.token))
},
"path" => token::nt_path(p.parse_path_with_tps(false)),
"tt" => {
*p.quote_depth += 1u; //but in theory, non-quoted tts might be useful
let res = token::nt_tt(@p.parse_token_tree());
*p.quote_depth -= 1u;
2012-07-06 16:48:01 -05:00
res
}
"matchers" => token::nt_matchers(p.parse_matchers()),
2012-08-03 21:59:04 -05:00
_ => p.fatal(~"Unsupported builtin nonterminal parser: " + name)
2012-06-12 12:59:50 -05:00
}
}