2016-03-19 11:59:12 -05:00
|
|
|
|
use rustc::lint::*;
|
|
|
|
|
use syntax::ast;
|
2016-05-02 07:36:33 -05:00
|
|
|
|
use syntax::codemap::{Span, BytePos};
|
2016-03-19 11:59:12 -05:00
|
|
|
|
use utils::span_lint;
|
|
|
|
|
|
2016-08-06 02:55:04 -05:00
|
|
|
|
/// **What it does:** Checks for the presence of `_`, `::` or camel-case words
|
|
|
|
|
/// outside ticks in documentation.
|
2016-03-19 11:59:12 -05:00
|
|
|
|
///
|
2016-08-06 02:55:04 -05:00
|
|
|
|
/// **Why is this bad?** *Rustdoc* supports markdown formatting, `_`, `::` and
|
|
|
|
|
/// camel-case probably indicates some code which should be included between
|
|
|
|
|
/// ticks. `_` can also be used for empasis in markdown, this lint tries to
|
|
|
|
|
/// consider that.
|
2016-03-19 11:59:12 -05:00
|
|
|
|
///
|
2016-08-06 02:55:04 -05:00
|
|
|
|
/// **Known problems:** Lots of bad docs won’t be fixed, what the lint checks
|
|
|
|
|
/// for is limited, and there are still false positives.
|
2016-03-19 11:59:12 -05:00
|
|
|
|
///
|
|
|
|
|
/// **Examples:**
|
|
|
|
|
/// ```rust
|
2016-03-28 11:00:24 -05:00
|
|
|
|
/// /// Do something with the foo_bar parameter. See also that::other::module::foo.
|
|
|
|
|
/// // ^ `foo_bar` and `that::other::module::foo` should be ticked.
|
2016-03-19 11:59:12 -05:00
|
|
|
|
/// fn doit(foo_bar) { .. }
|
|
|
|
|
/// ```
|
|
|
|
|
declare_lint! {
|
2016-08-06 03:18:36 -05:00
|
|
|
|
pub DOC_MARKDOWN,
|
|
|
|
|
Warn,
|
|
|
|
|
"presence of `_`, `::` or camel-case outside backticks in documentation"
|
2016-03-19 11:59:12 -05:00
|
|
|
|
}
|
|
|
|
|
|
2016-04-04 13:18:17 -05:00
|
|
|
|
#[derive(Clone)]
|
|
|
|
|
pub struct Doc {
|
|
|
|
|
valid_idents: Vec<String>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl Doc {
|
|
|
|
|
pub fn new(valid_idents: Vec<String>) -> Self {
|
|
|
|
|
Doc { valid_idents: valid_idents }
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-03-19 11:59:12 -05:00
|
|
|
|
|
|
|
|
|
impl LintPass for Doc {
|
|
|
|
|
fn get_lints(&self) -> LintArray {
|
|
|
|
|
lint_array![DOC_MARKDOWN]
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl EarlyLintPass for Doc {
|
|
|
|
|
fn check_crate(&mut self, cx: &EarlyContext, krate: &ast::Crate) {
|
2016-05-02 07:36:33 -05:00
|
|
|
|
check_attrs(cx, &self.valid_idents, &krate.attrs);
|
2016-03-19 11:59:12 -05:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn check_item(&mut self, cx: &EarlyContext, item: &ast::Item) {
|
2016-05-02 07:36:33 -05:00
|
|
|
|
check_attrs(cx, &self.valid_idents, &item.attrs);
|
2016-03-19 11:59:12 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-08 11:18:45 -05:00
|
|
|
|
/// Cleanup documentation decoration (`///` and such).
|
|
|
|
|
///
|
|
|
|
|
/// We can't use `syntax::attr::AttributeMethods::with_desugared_doc` or
|
|
|
|
|
/// `syntax::parse::lexer::comments::strip_doc_comment_decoration` because we need to keep track of
|
|
|
|
|
/// the span but this function is inspired from the later.
|
|
|
|
|
#[allow(cast_possible_truncation)]
|
2016-11-23 14:19:03 -06:00
|
|
|
|
pub fn strip_doc_comment_decoration((comment, span): (String, Span)) -> Vec<(String, Span)> {
|
2016-07-08 11:18:45 -05:00
|
|
|
|
// one-line comments lose their prefix
|
|
|
|
|
const ONELINERS: &'static [&'static str] = &["///!", "///", "//!", "//"];
|
|
|
|
|
for prefix in ONELINERS {
|
|
|
|
|
if comment.starts_with(*prefix) {
|
2016-12-20 11:21:30 -06:00
|
|
|
|
return vec![(comment[prefix.len()..].to_owned(),
|
|
|
|
|
Span { lo: span.lo + BytePos(prefix.len() as u32), ..span })];
|
2016-07-08 11:18:45 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if comment.starts_with("/*") {
|
2016-12-20 11:21:30 -06:00
|
|
|
|
return comment[3..comment.len() - 2]
|
|
|
|
|
.lines()
|
|
|
|
|
.map(|line| {
|
|
|
|
|
let offset = line.as_ptr() as usize - comment.as_ptr() as usize;
|
|
|
|
|
debug_assert_eq!(offset as u32 as usize, offset);
|
|
|
|
|
|
|
|
|
|
(line.to_owned(), Span { lo: span.lo + BytePos(offset as u32), ..span })
|
|
|
|
|
})
|
|
|
|
|
.collect();
|
2016-07-08 11:18:45 -05:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
panic!("not a doc-comment: {}", comment);
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-02 07:36:33 -05:00
|
|
|
|
pub fn check_attrs<'a>(cx: &EarlyContext, valid_idents: &[String], attrs: &'a [ast::Attribute]) {
|
2016-05-26 15:53:38 -05:00
|
|
|
|
let mut docs = vec![];
|
|
|
|
|
|
2016-05-02 07:36:33 -05:00
|
|
|
|
for attr in attrs {
|
2016-11-23 14:19:03 -06:00
|
|
|
|
if attr.is_sugared_doc {
|
2017-03-20 17:51:14 -05:00
|
|
|
|
if let Some(ref doc) = attr.value_str() {
|
2016-11-23 14:19:03 -06:00
|
|
|
|
let doc = (*doc.as_str()).to_owned();
|
2016-07-08 11:18:45 -05:00
|
|
|
|
docs.extend_from_slice(&strip_doc_comment_decoration((doc, attr.span)));
|
2016-03-19 11:59:12 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-04-11 16:22:30 -05:00
|
|
|
|
|
2016-05-27 20:18:52 -05:00
|
|
|
|
if !docs.is_empty() {
|
|
|
|
|
let _ = check_doc(cx, valid_idents, &docs);
|
2016-05-26 15:53:38 -05:00
|
|
|
|
}
|
2016-04-11 16:22:30 -05:00
|
|
|
|
}
|
2016-03-19 11:59:12 -05:00
|
|
|
|
|
2016-04-11 16:22:30 -05:00
|
|
|
|
#[allow(while_let_loop)] // #362
|
2016-11-23 14:19:03 -06:00
|
|
|
|
fn check_doc(cx: &EarlyContext, valid_idents: &[String], docs: &[(String, Span)]) -> Result<(), ()> {
|
2016-03-28 11:00:24 -05:00
|
|
|
|
// In markdown, `_` can be used to emphasize something, or, is a raw `_` depending on context.
|
|
|
|
|
// There really is no markdown specification that would disambiguate this properly. This is
|
|
|
|
|
// what GitHub and Rustdoc do:
|
|
|
|
|
//
|
|
|
|
|
// foo_bar test_quz → foo_bar test_quz
|
|
|
|
|
// foo_bar_baz → foo_bar_baz (note that the “official” spec says this should be emphasized)
|
|
|
|
|
// _foo bar_ test_quz_ → <em>foo bar</em> test_quz_
|
|
|
|
|
// \_foo bar\_ → _foo bar_
|
|
|
|
|
// (_baz_) → (<em>baz</em>)
|
|
|
|
|
// foo _ bar _ baz → foo _ bar _ baz
|
|
|
|
|
|
2016-05-26 15:53:38 -05:00
|
|
|
|
/// Character that can appear in a path
|
|
|
|
|
fn is_path_char(c: char) -> bool {
|
2016-04-11 16:22:30 -05:00
|
|
|
|
match c {
|
|
|
|
|
t if t.is_alphanumeric() => true,
|
|
|
|
|
':' | '_' => true,
|
|
|
|
|
_ => false,
|
2016-03-19 11:59:12 -05:00
|
|
|
|
}
|
2016-04-11 16:22:30 -05:00
|
|
|
|
}
|
2016-03-19 11:59:12 -05:00
|
|
|
|
|
2016-05-26 15:53:38 -05:00
|
|
|
|
#[derive(Clone, Debug)]
|
2016-05-27 20:18:52 -05:00
|
|
|
|
/// This type is used to iterate through the documentation characters, keeping the span at the
|
|
|
|
|
/// same time.
|
2016-05-26 15:53:38 -05:00
|
|
|
|
struct Parser<'a> {
|
2016-05-27 20:18:52 -05:00
|
|
|
|
/// First byte of the current potential match
|
2016-05-26 15:53:38 -05:00
|
|
|
|
current_word_begin: usize,
|
2016-05-27 20:18:52 -05:00
|
|
|
|
/// List of lines and their associated span
|
2016-11-23 14:19:03 -06:00
|
|
|
|
docs: &'a [(String, Span)],
|
2016-05-27 20:18:52 -05:00
|
|
|
|
/// Index of the current line we are parsing
|
|
|
|
|
line: usize,
|
|
|
|
|
/// Whether we are in a link
|
|
|
|
|
link: bool,
|
|
|
|
|
/// Whether we are at the beginning of a line
|
2016-05-26 15:53:38 -05:00
|
|
|
|
new_line: bool,
|
2016-05-27 20:18:52 -05:00
|
|
|
|
/// Whether we were to the end of a line last time `next` was called
|
|
|
|
|
reset: bool,
|
|
|
|
|
/// The position of the current character within the current line
|
2016-05-26 15:53:38 -05:00
|
|
|
|
pos: usize,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<'a> Parser<'a> {
|
|
|
|
|
fn advance_begin(&mut self) {
|
|
|
|
|
self.current_word_begin = self.pos;
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-27 20:18:52 -05:00
|
|
|
|
fn line(&self) -> (&'a str, Span) {
|
2016-11-23 14:19:03 -06:00
|
|
|
|
let (ref doc, span) = self.docs[self.line];
|
|
|
|
|
(doc, span)
|
2016-05-27 20:18:52 -05:00
|
|
|
|
}
|
|
|
|
|
|
2016-05-26 15:53:38 -05:00
|
|
|
|
fn peek(&self) -> Option<char> {
|
2016-05-27 20:18:52 -05:00
|
|
|
|
self.line().0[self.pos..].chars().next()
|
2016-05-26 15:53:38 -05:00
|
|
|
|
}
|
|
|
|
|
|
2016-05-27 20:18:52 -05:00
|
|
|
|
#[allow(while_let_on_iterator)] // borrowck complains about for
|
2016-07-08 11:18:45 -05:00
|
|
|
|
fn jump_to(&mut self, n: char) -> Result<bool, ()> {
|
|
|
|
|
while let Some((new_line, c)) = self.next() {
|
2016-05-26 15:53:38 -05:00
|
|
|
|
if c == n {
|
|
|
|
|
self.advance_begin();
|
2016-07-08 11:18:45 -05:00
|
|
|
|
return Ok(new_line);
|
2016-05-26 15:53:38 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-27 20:18:52 -05:00
|
|
|
|
Err(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn next_line(&mut self) {
|
|
|
|
|
self.pos = 0;
|
|
|
|
|
self.current_word_begin = 0;
|
|
|
|
|
self.line += 1;
|
|
|
|
|
self.new_line = true;
|
2016-05-26 15:53:38 -05:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn put_back(&mut self, c: char) {
|
|
|
|
|
self.pos -= c.len_utf8();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[allow(cast_possible_truncation)]
|
|
|
|
|
fn word(&self) -> (&'a str, Span) {
|
|
|
|
|
let begin = self.current_word_begin;
|
|
|
|
|
let end = self.pos;
|
|
|
|
|
|
|
|
|
|
debug_assert_eq!(end as u32 as usize, end);
|
|
|
|
|
debug_assert_eq!(begin as u32 as usize, begin);
|
|
|
|
|
|
2016-05-27 20:18:52 -05:00
|
|
|
|
let (doc, mut span) = self.line();
|
2016-05-26 15:53:38 -05:00
|
|
|
|
span.hi = span.lo + BytePos(end as u32);
|
|
|
|
|
span.lo = span.lo + BytePos(begin as u32);
|
|
|
|
|
|
2016-05-27 20:18:52 -05:00
|
|
|
|
(&doc[begin..end], span)
|
2016-05-26 15:53:38 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<'a> Iterator for Parser<'a> {
|
2016-05-27 20:18:52 -05:00
|
|
|
|
type Item = (bool, char);
|
|
|
|
|
|
|
|
|
|
fn next(&mut self) -> Option<(bool, char)> {
|
|
|
|
|
while self.line < self.docs.len() {
|
|
|
|
|
if self.reset {
|
|
|
|
|
self.line += 1;
|
|
|
|
|
self.reset = false;
|
|
|
|
|
self.pos = 0;
|
|
|
|
|
self.current_word_begin = 0;
|
|
|
|
|
}
|
2016-05-26 15:53:38 -05:00
|
|
|
|
|
2016-05-27 20:18:52 -05:00
|
|
|
|
let mut chars = self.line().0[self.pos..].chars();
|
|
|
|
|
let c = chars.next();
|
|
|
|
|
|
|
|
|
|
if let Some(c) = c {
|
|
|
|
|
self.pos += c.len_utf8();
|
|
|
|
|
let new_line = self.new_line;
|
|
|
|
|
self.new_line = c == '\n' || (self.new_line && c.is_whitespace());
|
|
|
|
|
return Some((new_line, c));
|
|
|
|
|
} else if self.line == self.docs.len() - 1 {
|
|
|
|
|
return None;
|
|
|
|
|
} else {
|
|
|
|
|
self.new_line = true;
|
|
|
|
|
self.reset = true;
|
|
|
|
|
self.pos += 1;
|
|
|
|
|
return Some((true, '\n'));
|
|
|
|
|
}
|
2016-05-26 15:53:38 -05:00
|
|
|
|
}
|
|
|
|
|
|
2016-05-27 20:18:52 -05:00
|
|
|
|
None
|
2016-05-26 15:53:38 -05:00
|
|
|
|
}
|
2016-05-02 07:36:48 -05:00
|
|
|
|
}
|
|
|
|
|
|
2016-05-26 15:53:38 -05:00
|
|
|
|
let mut parser = Parser {
|
|
|
|
|
current_word_begin: 0,
|
2016-05-27 20:18:52 -05:00
|
|
|
|
docs: docs,
|
|
|
|
|
line: 0,
|
|
|
|
|
link: false,
|
2016-05-26 15:53:38 -05:00
|
|
|
|
new_line: true,
|
2016-05-27 20:18:52 -05:00
|
|
|
|
reset: false,
|
2016-05-26 15:53:38 -05:00
|
|
|
|
pos: 0,
|
|
|
|
|
};
|
|
|
|
|
|
2016-07-08 11:18:45 -05:00
|
|
|
|
/// Check for fanced code block.
|
|
|
|
|
macro_rules! check_block {
|
|
|
|
|
($parser:expr, $c:tt, $new_line:expr) => {{
|
|
|
|
|
check_block!($parser, $c, $c, $new_line)
|
|
|
|
|
}};
|
|
|
|
|
|
|
|
|
|
($parser:expr, $c:pat, $c_expr:expr, $new_line:expr) => {{
|
|
|
|
|
fn check_block(parser: &mut Parser, new_line: bool) -> Result<bool, ()> {
|
|
|
|
|
if new_line {
|
|
|
|
|
let mut lookup_parser = parser.clone();
|
|
|
|
|
if let (Some((false, $c)), Some((false, $c))) = (lookup_parser.next(), lookup_parser.next()) {
|
|
|
|
|
*parser = lookup_parser;
|
|
|
|
|
// 3 or more ` or ~ open a code block to be closed with the same number of ` or ~
|
|
|
|
|
let mut open_count = 3;
|
|
|
|
|
while let Some((false, $c)) = parser.next() {
|
|
|
|
|
open_count += 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
loop {
|
|
|
|
|
loop {
|
|
|
|
|
if try!(parser.jump_to($c_expr)) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
lookup_parser = parser.clone();
|
2016-12-21 03:25:14 -06:00
|
|
|
|
let a = lookup_parser.next();
|
|
|
|
|
let b = lookup_parser.next();
|
|
|
|
|
if let (Some((false, $c)), Some((false, $c))) = (a, b) {
|
2016-07-08 11:18:45 -05:00
|
|
|
|
let mut close_count = 3;
|
|
|
|
|
while let Some((false, $c)) = lookup_parser.next() {
|
|
|
|
|
close_count += 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if close_count == open_count {
|
|
|
|
|
*parser = lookup_parser;
|
|
|
|
|
return Ok(true);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(false)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
check_block(&mut $parser, $new_line)
|
|
|
|
|
}};
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-11 16:22:30 -05:00
|
|
|
|
loop {
|
2016-05-26 15:53:38 -05:00
|
|
|
|
match parser.next() {
|
2016-05-27 20:18:52 -05:00
|
|
|
|
Some((new_line, c)) => {
|
2016-04-11 16:22:30 -05:00
|
|
|
|
match c {
|
2016-12-20 11:21:30 -06:00
|
|
|
|
'#' if new_line => {
|
|
|
|
|
// don’t warn on titles
|
2016-05-27 20:18:52 -05:00
|
|
|
|
parser.next_line();
|
2016-12-20 11:21:30 -06:00
|
|
|
|
},
|
2016-04-11 16:22:30 -05:00
|
|
|
|
'`' => {
|
2016-07-08 11:18:45 -05:00
|
|
|
|
if try!(check_block!(parser, '`', new_line)) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2017-01-17 12:30:32 -06:00
|
|
|
|
// not a code block, just inline code
|
|
|
|
|
try!(parser.jump_to('`'));
|
2016-12-20 11:21:30 -06:00
|
|
|
|
},
|
2016-07-08 11:18:45 -05:00
|
|
|
|
'~' => {
|
|
|
|
|
if try!(check_block!(parser, '~', new_line)) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ~ does not introduce inline code, but two of them introduce
|
|
|
|
|
// strikethrough. Too bad for the consistency but we don't care about
|
|
|
|
|
// strikethrough.
|
2016-12-20 11:21:30 -06:00
|
|
|
|
},
|
2016-04-11 16:22:30 -05:00
|
|
|
|
'[' => {
|
2016-05-26 15:53:38 -05:00
|
|
|
|
// Check for a reference definition `[foo]:` at the beginning of a line
|
|
|
|
|
let mut link = true;
|
2016-05-27 20:18:52 -05:00
|
|
|
|
|
|
|
|
|
if new_line {
|
2016-05-26 15:53:38 -05:00
|
|
|
|
let mut lookup_parser = parser.clone();
|
2016-10-29 11:56:12 -05:00
|
|
|
|
if lookup_parser.any(|(_, c)| c == ']') {
|
2016-05-27 20:18:52 -05:00
|
|
|
|
if let Some((_, ':')) = lookup_parser.next() {
|
|
|
|
|
lookup_parser.next_line();
|
2016-05-26 15:53:38 -05:00
|
|
|
|
parser = lookup_parser;
|
|
|
|
|
link = false;
|
2016-04-11 16:22:30 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-05-26 15:53:38 -05:00
|
|
|
|
|
|
|
|
|
parser.advance_begin();
|
|
|
|
|
parser.link = link;
|
2016-12-20 11:21:30 -06:00
|
|
|
|
},
|
2016-05-26 15:53:38 -05:00
|
|
|
|
']' if parser.link => {
|
|
|
|
|
parser.link = false;
|
|
|
|
|
|
|
|
|
|
match parser.peek() {
|
2016-07-08 11:18:45 -05:00
|
|
|
|
Some('(') => {
|
|
|
|
|
try!(parser.jump_to(')'));
|
2016-12-20 11:21:30 -06:00
|
|
|
|
},
|
2016-07-08 11:18:45 -05:00
|
|
|
|
Some('[') => {
|
|
|
|
|
try!(parser.jump_to(']'));
|
2016-12-20 11:21:30 -06:00
|
|
|
|
},
|
2016-05-26 15:53:38 -05:00
|
|
|
|
Some(_) => continue,
|
|
|
|
|
None => return Err(()),
|
|
|
|
|
}
|
2016-12-20 11:21:30 -06:00
|
|
|
|
},
|
2016-05-26 15:53:38 -05:00
|
|
|
|
c if !is_path_char(c) => {
|
|
|
|
|
parser.advance_begin();
|
2016-12-20 11:21:30 -06:00
|
|
|
|
},
|
2016-04-11 16:22:30 -05:00
|
|
|
|
_ => {
|
2016-05-27 20:18:52 -05:00
|
|
|
|
if let Some((_, c)) = parser.find(|&(_, c)| !is_path_char(c)) {
|
2016-05-26 15:53:38 -05:00
|
|
|
|
parser.put_back(c);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let (word, span) = parser.word();
|
|
|
|
|
check_word(cx, valid_idents, word, span);
|
|
|
|
|
parser.advance_begin();
|
2016-12-20 11:21:30 -06:00
|
|
|
|
},
|
2016-04-11 16:22:30 -05:00
|
|
|
|
}
|
2016-05-05 14:42:59 -05:00
|
|
|
|
|
2016-12-20 11:21:30 -06:00
|
|
|
|
},
|
2016-04-11 16:22:30 -05:00
|
|
|
|
None => break,
|
2016-03-19 11:59:12 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
2016-05-26 15:53:38 -05:00
|
|
|
|
|
|
|
|
|
Ok(())
|
2016-03-19 11:59:12 -05:00
|
|
|
|
}
|
|
|
|
|
|
2016-04-04 13:18:17 -05:00
|
|
|
|
fn check_word(cx: &EarlyContext, valid_idents: &[String], word: &str, span: Span) {
|
2016-03-19 11:59:12 -05:00
|
|
|
|
/// Checks if a string a camel-case, ie. contains at least two uppercase letter (`Clippy` is
|
|
|
|
|
/// ok) and one lower-case letter (`NASA` is ok). Plural are also excluded (`IDs` is ok).
|
|
|
|
|
fn is_camel_case(s: &str) -> bool {
|
2016-04-04 13:18:17 -05:00
|
|
|
|
if s.starts_with(|c: char| c.is_digit(10)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-19 11:59:12 -05:00
|
|
|
|
let s = if s.ends_with('s') {
|
2016-04-14 13:14:03 -05:00
|
|
|
|
&s[..s.len() - 1]
|
2016-03-19 11:59:12 -05:00
|
|
|
|
} else {
|
|
|
|
|
s
|
|
|
|
|
};
|
|
|
|
|
|
2016-12-20 11:21:30 -06:00
|
|
|
|
s.chars().all(char::is_alphanumeric) && s.chars().filter(|&c| c.is_uppercase()).take(2).count() > 1 &&
|
2016-03-19 11:59:12 -05:00
|
|
|
|
s.chars().filter(|&c| c.is_lowercase()).take(1).count() > 0
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-28 11:00:24 -05:00
|
|
|
|
fn has_underscore(s: &str) -> bool {
|
|
|
|
|
s != "_" && !s.contains("\\_") && s.contains('_')
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Trim punctuation as in `some comment (see foo::bar).`
|
|
|
|
|
// ^^
|
2016-04-01 10:24:55 -05:00
|
|
|
|
// Or even as in `_foo bar_` which is emphasized.
|
2016-03-28 11:00:24 -05:00
|
|
|
|
let word = word.trim_matches(|c: char| !c.is_alphanumeric());
|
|
|
|
|
|
2016-04-04 13:18:17 -05:00
|
|
|
|
if valid_idents.iter().any(|i| i == word) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-28 11:00:24 -05:00
|
|
|
|
if has_underscore(word) || word.contains("::") || is_camel_case(word) {
|
2016-04-14 13:14:03 -05:00
|
|
|
|
span_lint(cx,
|
|
|
|
|
DOC_MARKDOWN,
|
|
|
|
|
span,
|
|
|
|
|
&format!("you should put `{}` between ticks in the documentation", word));
|
2016-03-19 11:59:12 -05:00
|
|
|
|
}
|
|
|
|
|
}
|