2014-02-20 01:14:51 -08:00
|
|
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
|
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
|
|
|
//! Basic html highlighting functionality
|
|
|
|
//!
|
|
|
|
//! This module uses libsyntax's lexer to provide token-based highlighting for
|
|
|
|
//! the HTML documentation generated by rustdoc.
|
|
|
|
|
|
|
|
use html::escape::Escape;
|
|
|
|
|
2014-08-18 08:29:44 -07:00
|
|
|
use std::io;
|
|
|
|
use syntax::parse::lexer;
|
|
|
|
use syntax::parse::token as t;
|
|
|
|
use syntax::parse;
|
2014-02-20 01:14:51 -08:00
|
|
|
|
|
|
|
/// Highlights some source code, returning the HTML output.
|
2014-06-06 09:12:18 -07:00
|
|
|
pub fn highlight(src: &str, class: Option<&str>, id: Option<&str>) -> String {
|
2014-05-10 17:39:08 -07:00
|
|
|
debug!("highlighting: ================\n{}\n==============", src);
|
2014-02-20 01:14:51 -08:00
|
|
|
let sess = parse::new_parse_sess();
|
2014-05-07 16:33:43 -07:00
|
|
|
let fm = parse::string_to_filemap(&sess,
|
2014-05-25 03:17:19 -07:00
|
|
|
src.to_string(),
|
|
|
|
"<stdin>".to_string());
|
2014-02-20 01:14:51 -08:00
|
|
|
|
|
|
|
let mut out = io::MemWriter::new();
|
2014-03-09 16:54:34 +02:00
|
|
|
doit(&sess,
|
2014-05-21 16:57:31 -07:00
|
|
|
lexer::StringReader::new(&sess.span_diagnostic, fm),
|
2014-03-02 13:30:28 +11:00
|
|
|
class,
|
2014-06-06 09:12:18 -07:00
|
|
|
id,
|
2014-02-20 01:14:51 -08:00
|
|
|
&mut out).unwrap();
|
2014-07-10 18:21:16 +02:00
|
|
|
String::from_utf8_lossy(out.unwrap().as_slice()).into_string()
|
2014-02-20 01:14:51 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Exhausts the `lexer` writing the output into `out`.
|
|
|
|
///
|
|
|
|
/// The general structure for this method is to iterate over each token,
|
|
|
|
/// possibly giving it an HTML span with a class specifying what flavor of token
|
|
|
|
/// it's used. All source code emission is done as slices from the source map,
|
|
|
|
/// not from the tokens themselves, in order to stay true to the original
|
|
|
|
/// source.
|
2014-06-06 09:12:18 -07:00
|
|
|
fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
|
|
|
|
class: Option<&str>, id: Option<&str>,
|
2014-02-20 01:14:51 -08:00
|
|
|
out: &mut Writer) -> io::IoResult<()> {
|
|
|
|
use syntax::parse::lexer::Reader;
|
|
|
|
|
2014-06-06 09:12:18 -07:00
|
|
|
try!(write!(out, "<pre "));
|
|
|
|
match id {
|
|
|
|
Some(id) => try!(write!(out, "id='{}' ", id)),
|
|
|
|
None => {}
|
|
|
|
}
|
|
|
|
try!(write!(out, "class='rust {}'>\n", class.unwrap_or("")));
|
2014-02-20 01:14:51 -08:00
|
|
|
let mut is_attribute = false;
|
|
|
|
let mut is_macro = false;
|
2014-03-02 13:30:28 +11:00
|
|
|
let mut is_macro_nonterminal = false;
|
2014-02-20 01:14:51 -08:00
|
|
|
loop {
|
|
|
|
let next = lexer.next_token();
|
|
|
|
|
2014-07-04 22:30:39 -07:00
|
|
|
let snip = |sp| sess.span_diagnostic.cm.span_to_snippet(sp).unwrap();
|
|
|
|
|
2014-02-20 01:14:51 -08:00
|
|
|
if next.tok == t::EOF { break }
|
|
|
|
|
|
|
|
let klass = match next.tok {
|
2014-07-04 22:30:39 -07:00
|
|
|
t::WS => {
|
|
|
|
try!(write!(out, "{}", Escape(snip(next.sp).as_slice())));
|
|
|
|
continue
|
|
|
|
},
|
|
|
|
t::COMMENT => {
|
|
|
|
try!(write!(out, "<span class='comment'>{}</span>",
|
|
|
|
Escape(snip(next.sp).as_slice())));
|
|
|
|
continue
|
|
|
|
},
|
|
|
|
t::SHEBANG(s) => {
|
|
|
|
try!(write!(out, "{}", Escape(s.as_str())));
|
|
|
|
continue
|
|
|
|
},
|
2014-02-20 01:14:51 -08:00
|
|
|
// If this '&' token is directly adjacent to another token, assume
|
|
|
|
// that it's the address-of operator instead of the and-operator.
|
2014-05-05 18:56:44 -07:00
|
|
|
// This allows us to give all pointers their own class (`Box` and
|
|
|
|
// `@` are below).
|
2014-02-20 01:14:51 -08:00
|
|
|
t::BINOP(t::AND) if lexer.peek().sp.lo == next.sp.hi => "kw-2",
|
|
|
|
t::AT | t::TILDE => "kw-2",
|
|
|
|
|
|
|
|
// consider this as part of a macro invocation if there was a
|
|
|
|
// leading identifier
|
|
|
|
t::NOT if is_macro => { is_macro = false; "macro" }
|
|
|
|
|
|
|
|
// operators
|
|
|
|
t::EQ | t::LT | t::LE | t::EQEQ | t::NE | t::GE | t::GT |
|
|
|
|
t::ANDAND | t::OROR | t::NOT | t::BINOP(..) | t::RARROW |
|
|
|
|
t::BINOPEQ(..) | t::FAT_ARROW => "op",
|
|
|
|
|
|
|
|
// miscellaneous, no highlighting
|
|
|
|
t::DOT | t::DOTDOT | t::DOTDOTDOT | t::COMMA | t::SEMI |
|
2014-05-22 10:49:26 -07:00
|
|
|
t::COLON | t::MOD_SEP | t::LARROW | t::LPAREN |
|
2014-07-08 14:26:02 +12:00
|
|
|
t::RPAREN | t::LBRACKET | t::LBRACE | t::RBRACE | t::QUESTION => "",
|
2014-03-02 13:30:28 +11:00
|
|
|
t::DOLLAR => {
|
|
|
|
if t::is_ident(&lexer.peek().tok) {
|
|
|
|
is_macro_nonterminal = true;
|
|
|
|
"macro-nonterminal"
|
|
|
|
} else {
|
|
|
|
""
|
|
|
|
}
|
|
|
|
}
|
2014-02-20 01:14:51 -08:00
|
|
|
|
|
|
|
// This is the start of an attribute. We're going to want to
|
|
|
|
// continue highlighting it as an attribute until the ending ']' is
|
|
|
|
// seen, so skip out early. Down below we terminate the attribute
|
|
|
|
// span when we see the ']'.
|
|
|
|
t::POUND => {
|
|
|
|
is_attribute = true;
|
2014-06-14 11:03:34 -07:00
|
|
|
try!(write!(out, r"<span class='attribute'>#"));
|
2014-02-20 01:14:51 -08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
t::RBRACKET => {
|
|
|
|
if is_attribute {
|
|
|
|
is_attribute = false;
|
|
|
|
try!(write!(out, "]</span>"));
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
""
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// text literals
|
2014-06-13 18:56:24 +01:00
|
|
|
t::LIT_BYTE(..) | t::LIT_BINARY(..) | t::LIT_BINARY_RAW(..) |
|
2014-06-07 15:32:01 +01:00
|
|
|
t::LIT_CHAR(..) | t::LIT_STR(..) | t::LIT_STR_RAW(..) => "string",
|
2014-02-20 01:14:51 -08:00
|
|
|
|
|
|
|
// number literals
|
2014-06-18 10:44:20 -07:00
|
|
|
t::LIT_INTEGER(..) | t::LIT_FLOAT(..) => "number",
|
2014-02-20 01:14:51 -08:00
|
|
|
|
|
|
|
// keywords are also included in the identifier set
|
|
|
|
t::IDENT(ident, _is_mod_sep) => {
|
|
|
|
match t::get_ident(ident).get() {
|
|
|
|
"ref" | "mut" => "kw-2",
|
|
|
|
|
|
|
|
"self" => "self",
|
|
|
|
"false" | "true" => "boolval",
|
|
|
|
|
|
|
|
"Option" | "Result" => "prelude-ty",
|
|
|
|
"Some" | "None" | "Ok" | "Err" => "prelude-val",
|
|
|
|
|
|
|
|
_ if t::is_any_keyword(&next.tok) => "kw",
|
|
|
|
_ => {
|
2014-03-02 13:30:28 +11:00
|
|
|
if is_macro_nonterminal {
|
|
|
|
is_macro_nonterminal = false;
|
|
|
|
"macro-nonterminal"
|
|
|
|
} else if lexer.peek().tok == t::NOT {
|
2014-02-20 01:14:51 -08:00
|
|
|
is_macro = true;
|
|
|
|
"macro"
|
|
|
|
} else {
|
|
|
|
"ident"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
t::LIFETIME(..) => "lifetime",
|
|
|
|
t::DOC_COMMENT(..) => "doccomment",
|
|
|
|
t::UNDERSCORE | t::EOF | t::INTERPOLATED(..) => "",
|
|
|
|
};
|
|
|
|
|
|
|
|
// as mentioned above, use the original source code instead of
|
|
|
|
// stringifying this token
|
2014-03-17 09:55:41 +02:00
|
|
|
let snip = sess.span_diagnostic.cm.span_to_snippet(next.sp).unwrap();
|
2014-02-20 01:14:51 -08:00
|
|
|
if klass == "" {
|
2014-05-07 16:33:43 -07:00
|
|
|
try!(write!(out, "{}", Escape(snip.as_slice())));
|
2014-02-20 01:14:51 -08:00
|
|
|
} else {
|
|
|
|
try!(write!(out, "<span class='{}'>{}</span>", klass,
|
2014-05-07 16:33:43 -07:00
|
|
|
Escape(snip.as_slice())));
|
2014-02-20 01:14:51 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
write!(out, "</pre>\n")
|
|
|
|
}
|