2016-05-02 10:53:24 +12:00
|
|
|
// Copyright 2014-2016 The Rust Project Developers. See the COPYRIGHT
|
2014-02-20 01:14:51 -08:00
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2016-05-02 10:53:24 +12:00
|
|
|
//! Basic syntax highlighting functionality.
|
2014-02-20 01:14:51 -08:00
|
|
|
//!
|
|
|
|
//! This module uses libsyntax's lexer to provide token-based highlighting for
|
|
|
|
//! the HTML documentation generated by rustdoc.
|
2016-05-02 10:53:24 +12:00
|
|
|
//!
|
|
|
|
//! If you just want to syntax highlighting for a Rust program, then you can use
|
|
|
|
//! the `render_inner_with_highlighting` or `render_with_highlighting`
|
|
|
|
//! functions. For more advanced use cases (if you want to supply your own css
|
|
|
|
//! classes or control how the HTML is generated, or even generate something
|
|
|
|
//! other then HTML), then you should implement the the `Writer` trait and use a
|
|
|
|
//! `Classifier`.
|
2014-02-20 01:14:51 -08:00
|
|
|
|
|
|
|
use html::escape::Escape;
|
|
|
|
|
2016-05-02 10:53:24 +12:00
|
|
|
use std::fmt::Display;
|
2015-03-11 15:24:14 -07:00
|
|
|
use std::io;
|
|
|
|
use std::io::prelude::*;
|
2016-05-02 10:53:24 +12:00
|
|
|
|
2016-06-21 18:08:13 -04:00
|
|
|
use syntax::codemap::CodeMap;
|
2016-05-02 10:53:24 +12:00
|
|
|
use syntax::parse::lexer::{self, Reader, TokenAndSpan};
|
2014-10-27 19:22:52 +11:00
|
|
|
use syntax::parse::token;
|
2014-08-18 08:29:44 -07:00
|
|
|
use syntax::parse;
|
2016-06-21 18:08:13 -04:00
|
|
|
use syntax_pos::Span;
|
2014-02-20 01:14:51 -08:00
|
|
|
|
2016-04-04 11:07:41 +12:00
|
|
|
/// Highlights `src`, returning the HTML output.
|
|
|
|
pub fn render_with_highlighting(src: &str, class: Option<&str>, id: Option<&str>) -> String {
|
2014-05-10 17:39:08 -07:00
|
|
|
debug!("highlighting: ================\n{}\n==============", src);
|
2015-05-13 23:00:17 +03:00
|
|
|
let sess = parse::ParseSess::new();
|
2016-06-09 16:36:20 -04:00
|
|
|
let fm = sess.codemap().new_filemap("<stdin>".to_string(), None, src.to_string());
|
2014-02-20 01:14:51 -08:00
|
|
|
|
2014-11-11 16:01:29 -05:00
|
|
|
let mut out = Vec::new();
|
2016-04-04 11:07:41 +12:00
|
|
|
write_header(class, id, &mut out).unwrap();
|
2016-05-02 10:53:24 +12:00
|
|
|
|
|
|
|
let mut classifier = Classifier::new(lexer::StringReader::new(&sess.span_diagnostic, fm),
|
|
|
|
sess.codemap());
|
|
|
|
if let Err(_) = classifier.write_source(&mut out) {
|
|
|
|
return format!("<pre>{}</pre>", src);
|
2016-04-25 17:20:32 +02:00
|
|
|
}
|
2016-05-02 10:53:24 +12:00
|
|
|
|
2016-04-04 11:07:41 +12:00
|
|
|
write_footer(&mut out).unwrap();
|
|
|
|
String::from_utf8_lossy(&out[..]).into_owned()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Highlights `src`, returning the HTML output. Returns only the inner html to
|
|
|
|
/// be inserted into an element. C.f., `render_with_highlighting` which includes
|
|
|
|
/// an enclosing `<pre>` block.
|
2016-04-25 17:20:32 +02:00
|
|
|
pub fn render_inner_with_highlighting(src: &str) -> io::Result<String> {
|
2016-04-04 11:07:41 +12:00
|
|
|
let sess = parse::ParseSess::new();
|
2016-06-09 16:36:20 -04:00
|
|
|
let fm = sess.codemap().new_filemap("<stdin>".to_string(), None, src.to_string());
|
2016-04-04 11:07:41 +12:00
|
|
|
|
|
|
|
let mut out = Vec::new();
|
2016-05-02 10:53:24 +12:00
|
|
|
let mut classifier = Classifier::new(lexer::StringReader::new(&sess.span_diagnostic, fm),
|
|
|
|
sess.codemap());
|
|
|
|
classifier.write_source(&mut out)?;
|
|
|
|
|
|
|
|
Ok(String::from_utf8_lossy(&out).into_owned())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Processes a program (nested in the internal `lexer`), classifying strings of
|
|
|
|
/// text by highlighting category (`Class`). Calls out to a `Writer` to write
|
|
|
|
/// each span of text in sequence.
|
|
|
|
pub struct Classifier<'a> {
|
|
|
|
lexer: lexer::StringReader<'a>,
|
|
|
|
codemap: &'a CodeMap,
|
|
|
|
|
|
|
|
// State of the classifier.
|
|
|
|
in_attribute: bool,
|
|
|
|
in_macro: bool,
|
|
|
|
in_macro_nonterminal: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// How a span of text is classified. Mostly corresponds to token kinds.
|
|
|
|
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
|
|
|
pub enum Class {
|
|
|
|
None,
|
|
|
|
Comment,
|
|
|
|
DocComment,
|
|
|
|
Attribute,
|
|
|
|
KeyWord,
|
|
|
|
// Keywords that do pointer/reference stuff.
|
|
|
|
RefKeyWord,
|
|
|
|
Self_,
|
|
|
|
Op,
|
|
|
|
Macro,
|
|
|
|
MacroNonTerminal,
|
|
|
|
String,
|
|
|
|
Number,
|
|
|
|
Bool,
|
|
|
|
Ident,
|
|
|
|
Lifetime,
|
|
|
|
PreludeTy,
|
|
|
|
PreludeVal,
|
2014-02-20 01:14:51 -08:00
|
|
|
}
|
|
|
|
|
2016-05-02 10:53:24 +12:00
|
|
|
/// Trait that controls writing the output of syntax highlighting. Users should
|
|
|
|
/// implement this trait to customise writing output.
|
2014-02-20 01:14:51 -08:00
|
|
|
///
|
2016-05-02 10:53:24 +12:00
|
|
|
/// The classifier will call into the `Writer` implementation as it finds spans
|
|
|
|
/// of text to highlight. Exactly how that text should be highlighted is up to
|
|
|
|
/// the implemention.
|
|
|
|
pub trait Writer {
|
|
|
|
/// Called when we start processing a span of text that should be highlighted.
|
|
|
|
/// The `Class` argument specifies how it should be highlighted.
|
|
|
|
fn enter_span(&mut self, Class) -> io::Result<()>;
|
2014-02-20 01:14:51 -08:00
|
|
|
|
2016-05-02 10:53:24 +12:00
|
|
|
/// Called at the end of a span of highlighted text.
|
|
|
|
fn exit_span(&mut self) -> io::Result<()>;
|
2014-07-04 22:30:39 -07:00
|
|
|
|
2016-05-02 10:53:24 +12:00
|
|
|
/// Called for a span of text, usually, but not always, a single token. If
|
|
|
|
/// the string of text (`T`) does correspond to a token, then the token will
|
|
|
|
/// also be passed. If the text should be highlighted differently from the
|
|
|
|
/// surrounding text, then the `Class` argument will be a value other than
|
|
|
|
/// `None`.
|
|
|
|
/// The following sequences of callbacks are equivalent:
|
|
|
|
/// ```plain
|
|
|
|
/// enter_span(Foo), string("text", None), exit_span()
|
|
|
|
/// string("text", Foo)
|
|
|
|
/// ```
|
|
|
|
/// The latter can be thought of as a shorthand for the former, which is
|
|
|
|
/// more flexible.
|
|
|
|
fn string<T: Display>(&mut self, T, Class, Option<&TokenAndSpan>) -> io::Result<()>;
|
|
|
|
}
|
2014-02-20 01:14:51 -08:00
|
|
|
|
2016-05-02 10:53:24 +12:00
|
|
|
// Implement `Writer` for anthing that can be written to, this just implements
|
|
|
|
// the default rustdoc behaviour.
|
|
|
|
impl<U: Write> Writer for U {
|
|
|
|
fn string<T: Display>(&mut self,
|
|
|
|
text: T,
|
|
|
|
klass: Class,
|
|
|
|
_tas: Option<&TokenAndSpan>)
|
|
|
|
-> io::Result<()> {
|
|
|
|
match klass {
|
|
|
|
Class::None => write!(self, "{}", text),
|
|
|
|
klass => write!(self, "<span class='{}'>{}</span>", klass.rustdoc_class(), text),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn enter_span(&mut self, klass: Class) -> io::Result<()> {
|
|
|
|
write!(self, "<span class='{}'>", klass.rustdoc_class())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn exit_span(&mut self) -> io::Result<()> {
|
|
|
|
write!(self, "</span>")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> Classifier<'a> {
|
|
|
|
pub fn new(lexer: lexer::StringReader<'a>, codemap: &'a CodeMap) -> Classifier<'a> {
|
|
|
|
Classifier {
|
|
|
|
lexer: lexer,
|
|
|
|
codemap: codemap,
|
|
|
|
in_attribute: false,
|
|
|
|
in_macro: false,
|
|
|
|
in_macro_nonterminal: false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Exhausts the `lexer` writing the output into `out`.
|
|
|
|
///
|
|
|
|
/// The general structure for this method is to iterate over each token,
|
|
|
|
/// possibly giving it an HTML span with a class specifying what flavor of token
|
|
|
|
/// is used. All source code emission is done as slices from the source map,
|
|
|
|
/// not from the tokens themselves, in order to stay true to the original
|
|
|
|
/// source.
|
|
|
|
pub fn write_source<W: Writer>(&mut self,
|
|
|
|
out: &mut W)
|
|
|
|
-> io::Result<()> {
|
|
|
|
loop {
|
|
|
|
let next = match self.lexer.try_next_token() {
|
|
|
|
Ok(tas) => tas,
|
|
|
|
Err(_) => {
|
|
|
|
self.lexer.emit_fatal_errors();
|
|
|
|
self.lexer.span_diagnostic.struct_warn("Backing out of syntax highlighting")
|
|
|
|
.note("You probably did not intend to render this \
|
|
|
|
as a rust code-block")
|
|
|
|
.emit();
|
|
|
|
return Err(io::Error::new(io::ErrorKind::Other, ""));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if next.tok == token::Eof {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
self.write_token(out, next)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handles an individual token from the lexer.
|
|
|
|
fn write_token<W: Writer>(&mut self,
|
|
|
|
out: &mut W,
|
|
|
|
tas: TokenAndSpan)
|
|
|
|
-> io::Result<()> {
|
|
|
|
let klass = match tas.tok {
|
2014-10-27 19:22:52 +11:00
|
|
|
token::Shebang(s) => {
|
2016-05-02 10:53:24 +12:00
|
|
|
out.string(Escape(&s.as_str()), Class::None, Some(&tas))?;
|
|
|
|
return Ok(());
|
2014-07-04 22:30:39 -07:00
|
|
|
},
|
2016-05-02 10:53:24 +12:00
|
|
|
|
|
|
|
token::Whitespace => Class::None,
|
|
|
|
token::Comment => Class::Comment,
|
|
|
|
token::DocComment(..) => Class::DocComment,
|
|
|
|
|
2014-02-20 01:14:51 -08:00
|
|
|
// If this '&' token is directly adjacent to another token, assume
|
|
|
|
// that it's the address-of operator instead of the and-operator.
|
2016-05-02 10:53:24 +12:00
|
|
|
token::BinOp(token::And) if self.lexer.peek().sp.lo == tas.sp.hi => Class::RefKeyWord,
|
2014-02-20 01:14:51 -08:00
|
|
|
|
2016-05-02 10:53:24 +12:00
|
|
|
// Consider this as part of a macro invocation if there was a
|
|
|
|
// leading identifier.
|
|
|
|
token::Not if self.in_macro => {
|
|
|
|
self.in_macro = false;
|
|
|
|
Class::Macro
|
|
|
|
}
|
2014-02-20 01:14:51 -08:00
|
|
|
|
2016-05-02 10:53:24 +12:00
|
|
|
// Operators.
|
2014-10-27 19:22:52 +11:00
|
|
|
token::Eq | token::Lt | token::Le | token::EqEq | token::Ne | token::Ge | token::Gt |
|
|
|
|
token::AndAnd | token::OrOr | token::Not | token::BinOp(..) | token::RArrow |
|
2016-05-02 10:53:24 +12:00
|
|
|
token::BinOpEq(..) | token::FatArrow => Class::Op,
|
2014-02-20 01:14:51 -08:00
|
|
|
|
2016-05-02 10:53:24 +12:00
|
|
|
// Miscellaneous, no highlighting.
|
2014-10-27 19:22:52 +11:00
|
|
|
token::Dot | token::DotDot | token::DotDotDot | token::Comma | token::Semi |
|
2014-10-29 21:37:54 +11:00
|
|
|
token::Colon | token::ModSep | token::LArrow | token::OpenDelim(_) |
|
|
|
|
token::CloseDelim(token::Brace) | token::CloseDelim(token::Paren) |
|
2016-05-02 10:53:24 +12:00
|
|
|
token::Question => Class::None,
|
2014-10-27 19:22:52 +11:00
|
|
|
token::Dollar => {
|
2016-05-02 10:53:24 +12:00
|
|
|
if self.lexer.peek().tok.is_ident() {
|
|
|
|
self.in_macro_nonterminal = true;
|
|
|
|
Class::MacroNonTerminal
|
2014-03-02 13:30:28 +11:00
|
|
|
} else {
|
2016-05-02 10:53:24 +12:00
|
|
|
Class::None
|
2014-03-02 13:30:28 +11:00
|
|
|
}
|
|
|
|
}
|
2014-02-20 01:14:51 -08:00
|
|
|
|
|
|
|
// This is the start of an attribute. We're going to want to
|
|
|
|
// continue highlighting it as an attribute until the ending ']' is
|
|
|
|
// seen, so skip out early. Down below we terminate the attribute
|
|
|
|
// span when we see the ']'.
|
2014-10-27 19:22:52 +11:00
|
|
|
token::Pound => {
|
2016-05-02 10:53:24 +12:00
|
|
|
self.in_attribute = true;
|
|
|
|
out.enter_span(Class::Attribute)?;
|
|
|
|
out.string("#", Class::None, None)?;
|
|
|
|
return Ok(());
|
2014-02-20 01:14:51 -08:00
|
|
|
}
|
2014-10-29 21:37:54 +11:00
|
|
|
token::CloseDelim(token::Bracket) => {
|
2016-05-02 10:53:24 +12:00
|
|
|
if self.in_attribute {
|
|
|
|
self.in_attribute = false;
|
|
|
|
out.string("]", Class::None, None)?;
|
|
|
|
out.exit_span()?;
|
|
|
|
return Ok(());
|
2014-02-20 01:14:51 -08:00
|
|
|
} else {
|
2016-05-02 10:53:24 +12:00
|
|
|
Class::None
|
2014-02-20 01:14:51 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-19 15:48:38 +11:00
|
|
|
token::Literal(lit, _suf) => {
|
|
|
|
match lit {
|
2016-05-02 10:53:24 +12:00
|
|
|
// Text literals.
|
2014-11-19 15:48:38 +11:00
|
|
|
token::Byte(..) | token::Char(..) |
|
2015-09-03 10:54:53 +03:00
|
|
|
token::ByteStr(..) | token::ByteStrRaw(..) |
|
2016-05-02 10:53:24 +12:00
|
|
|
token::Str_(..) | token::StrRaw(..) => Class::String,
|
2014-11-19 15:48:38 +11:00
|
|
|
|
2016-05-02 10:53:24 +12:00
|
|
|
// Number literals.
|
|
|
|
token::Integer(..) | token::Float(..) => Class::Number,
|
2014-11-19 15:48:38 +11:00
|
|
|
}
|
|
|
|
}
|
2014-02-20 01:14:51 -08:00
|
|
|
|
2016-05-02 10:53:24 +12:00
|
|
|
// Keywords are also included in the identifier set.
|
2016-04-16 04:12:02 +03:00
|
|
|
token::Ident(ident) => {
|
2015-07-28 18:07:20 +02:00
|
|
|
match &*ident.name.as_str() {
|
2016-05-02 10:53:24 +12:00
|
|
|
"ref" | "mut" => Class::RefKeyWord,
|
2014-02-20 01:14:51 -08:00
|
|
|
|
2016-05-02 10:53:24 +12:00
|
|
|
"self" |"Self" => Class::Self_,
|
|
|
|
"false" | "true" => Class::Bool,
|
2014-02-20 01:14:51 -08:00
|
|
|
|
2016-05-02 10:53:24 +12:00
|
|
|
"Option" | "Result" => Class::PreludeTy,
|
|
|
|
"Some" | "None" | "Ok" | "Err" => Class::PreludeVal,
|
2014-02-20 01:14:51 -08:00
|
|
|
|
2016-05-02 10:53:24 +12:00
|
|
|
_ if tas.tok.is_any_keyword() => Class::KeyWord,
|
2014-02-20 01:14:51 -08:00
|
|
|
_ => {
|
2016-05-02 10:53:24 +12:00
|
|
|
if self.in_macro_nonterminal {
|
|
|
|
self.in_macro_nonterminal = false;
|
|
|
|
Class::MacroNonTerminal
|
|
|
|
} else if self.lexer.peek().tok == token::Not {
|
|
|
|
self.in_macro = true;
|
|
|
|
Class::Macro
|
2014-02-20 01:14:51 -08:00
|
|
|
} else {
|
2016-05-02 10:53:24 +12:00
|
|
|
Class::Ident
|
2014-02-20 01:14:51 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-02 10:53:24 +12:00
|
|
|
// Special macro vars are like keywords.
|
|
|
|
token::SpecialVarNt(_) => Class::KeyWord,
|
|
|
|
|
|
|
|
token::Lifetime(..) => Class::Lifetime,
|
2014-09-15 18:27:28 -07:00
|
|
|
|
2014-10-06 23:00:56 +01:00
|
|
|
token::Underscore | token::Eof | token::Interpolated(..) |
|
2016-05-02 10:53:24 +12:00
|
|
|
token::MatchNt(..) | token::SubstNt(..) | token::Tilde | token::At => Class::None,
|
2014-02-20 01:14:51 -08:00
|
|
|
};
|
|
|
|
|
2016-05-02 10:53:24 +12:00
|
|
|
// Anything that didn't return above is the simple case where we the
|
|
|
|
// class just spans a single token, so we can use the `string` method.
|
|
|
|
out.string(Escape(&self.snip(tas.sp)), klass, Some(&tas))
|
2014-02-20 01:14:51 -08:00
|
|
|
}
|
|
|
|
|
2016-05-02 10:53:24 +12:00
|
|
|
// Helper function to get a snippet from the codemap.
|
|
|
|
fn snip(&self, sp: Span) -> String {
|
|
|
|
self.codemap.span_to_snippet(sp).unwrap()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Class {
|
|
|
|
/// Returns the css class expected by rustdoc for each `Class`.
|
|
|
|
pub fn rustdoc_class(self) -> &'static str {
|
|
|
|
match self {
|
|
|
|
Class::None => "",
|
|
|
|
Class::Comment => "comment",
|
|
|
|
Class::DocComment => "doccomment",
|
|
|
|
Class::Attribute => "attribute",
|
|
|
|
Class::KeyWord => "kw",
|
|
|
|
Class::RefKeyWord => "kw-2",
|
|
|
|
Class::Self_ => "self",
|
|
|
|
Class::Op => "op",
|
|
|
|
Class::Macro => "macro",
|
|
|
|
Class::MacroNonTerminal => "macro-nonterminal",
|
|
|
|
Class::String => "string",
|
|
|
|
Class::Number => "number",
|
2016-05-15 23:06:51 +02:00
|
|
|
Class::Bool => "bool-val",
|
2016-05-02 10:53:24 +12:00
|
|
|
Class::Ident => "ident",
|
|
|
|
Class::Lifetime => "lifetime",
|
|
|
|
Class::PreludeTy => "prelude-ty",
|
|
|
|
Class::PreludeVal => "prelude-val",
|
|
|
|
}
|
|
|
|
}
|
2016-04-04 11:07:41 +12:00
|
|
|
}
|
|
|
|
|
|
|
|
fn write_header(class: Option<&str>,
|
|
|
|
id: Option<&str>,
|
|
|
|
out: &mut Write)
|
|
|
|
-> io::Result<()> {
|
|
|
|
write!(out, "<pre ")?;
|
|
|
|
match id {
|
|
|
|
Some(id) => write!(out, "id='{}' ", id)?,
|
|
|
|
None => {}
|
|
|
|
}
|
|
|
|
write!(out, "class='rust {}'>\n", class.unwrap_or(""))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn write_footer(out: &mut Write) -> io::Result<()> {
|
2014-02-20 01:14:51 -08:00
|
|
|
write!(out, "</pre>\n")
|
|
|
|
}
|