Merge #226
226: Validate byte literals and byte strings r=aochagavia a=aochagavia Co-authored-by: Adolfo Ochagavía <aochagavia92@gmail.com>
This commit is contained in:
commit
9aebd9e6ca
@ -372,6 +372,80 @@ pub fn owned(&self) -> BreakExprNode {
|
||||
|
||||
impl<'a> BreakExpr<'a> {}
|
||||
|
||||
// Byte
|
||||
#[derive(Debug, Clone, Copy,)]
|
||||
pub struct ByteNode<R: TreeRoot<RaTypes> = OwnedRoot> {
|
||||
pub(crate) syntax: SyntaxNode<R>,
|
||||
}
|
||||
pub type Byte<'a> = ByteNode<RefRoot<'a>>;
|
||||
|
||||
impl<R1: TreeRoot<RaTypes>, R2: TreeRoot<RaTypes>> PartialEq<ByteNode<R1>> for ByteNode<R2> {
|
||||
fn eq(&self, other: &ByteNode<R1>) -> bool { self.syntax == other.syntax }
|
||||
}
|
||||
impl<R: TreeRoot<RaTypes>> Eq for ByteNode<R> {}
|
||||
impl<R: TreeRoot<RaTypes>> Hash for ByteNode<R> {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) { self.syntax.hash(state) }
|
||||
}
|
||||
|
||||
impl<'a> AstNode<'a> for Byte<'a> {
|
||||
fn cast(syntax: SyntaxNodeRef<'a>) -> Option<Self> {
|
||||
match syntax.kind() {
|
||||
BYTE => Some(Byte { syntax }),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
fn syntax(self) -> SyntaxNodeRef<'a> { self.syntax }
|
||||
}
|
||||
|
||||
impl<R: TreeRoot<RaTypes>> ByteNode<R> {
|
||||
pub fn borrowed(&self) -> Byte {
|
||||
ByteNode { syntax: self.syntax.borrowed() }
|
||||
}
|
||||
pub fn owned(&self) -> ByteNode {
|
||||
ByteNode { syntax: self.syntax.owned() }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<'a> Byte<'a> {}
|
||||
|
||||
// ByteString
|
||||
#[derive(Debug, Clone, Copy,)]
|
||||
pub struct ByteStringNode<R: TreeRoot<RaTypes> = OwnedRoot> {
|
||||
pub(crate) syntax: SyntaxNode<R>,
|
||||
}
|
||||
pub type ByteString<'a> = ByteStringNode<RefRoot<'a>>;
|
||||
|
||||
impl<R1: TreeRoot<RaTypes>, R2: TreeRoot<RaTypes>> PartialEq<ByteStringNode<R1>> for ByteStringNode<R2> {
|
||||
fn eq(&self, other: &ByteStringNode<R1>) -> bool { self.syntax == other.syntax }
|
||||
}
|
||||
impl<R: TreeRoot<RaTypes>> Eq for ByteStringNode<R> {}
|
||||
impl<R: TreeRoot<RaTypes>> Hash for ByteStringNode<R> {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) { self.syntax.hash(state) }
|
||||
}
|
||||
|
||||
impl<'a> AstNode<'a> for ByteString<'a> {
|
||||
fn cast(syntax: SyntaxNodeRef<'a>) -> Option<Self> {
|
||||
match syntax.kind() {
|
||||
BYTE_STRING => Some(ByteString { syntax }),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
fn syntax(self) -> SyntaxNodeRef<'a> { self.syntax }
|
||||
}
|
||||
|
||||
impl<R: TreeRoot<RaTypes>> ByteStringNode<R> {
|
||||
pub fn borrowed(&self) -> ByteString {
|
||||
ByteStringNode { syntax: self.syntax.borrowed() }
|
||||
}
|
||||
pub fn owned(&self) -> ByteStringNode {
|
||||
ByteStringNode { syntax: self.syntax.owned() }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<'a> ByteString<'a> {}
|
||||
|
||||
// CallExpr
|
||||
#[derive(Debug, Clone, Copy,)]
|
||||
pub struct CallExprNode<R: TreeRoot<RaTypes> = OwnedRoot> {
|
||||
|
@ -134,6 +134,18 @@ pub fn text(&self) -> &SmolStr {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Byte<'a> {
|
||||
pub fn text(&self) -> &SmolStr {
|
||||
&self.syntax().leaf_text().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ByteString<'a> {
|
||||
pub fn text(&self) -> &SmolStr {
|
||||
&self.syntax().leaf_text().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> String<'a> {
|
||||
pub fn text(&self) -> &SmolStr {
|
||||
&self.syntax().leaf_text().unwrap()
|
||||
|
@ -412,6 +412,8 @@ Grammar(
|
||||
"RangeExpr": (),
|
||||
"BinExpr": (),
|
||||
"String": (),
|
||||
"Byte": (),
|
||||
"ByteString": (),
|
||||
"Char": (),
|
||||
"Literal": (),
|
||||
|
||||
|
@ -1,414 +0,0 @@
|
||||
use self::CharComponentKind::*;
|
||||
use rowan::{TextRange, TextUnit};
|
||||
|
||||
pub fn parse_string_literal(src: &str) -> StringComponentIterator {
|
||||
StringComponentIterator {
|
||||
parser: Parser::new(src),
|
||||
has_closing_quote: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||
pub struct StringComponent {
|
||||
pub range: TextRange,
|
||||
pub kind: StringComponentKind,
|
||||
}
|
||||
|
||||
impl StringComponent {
|
||||
fn new(range: TextRange, kind: StringComponentKind) -> StringComponent {
|
||||
StringComponent { range, kind }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||
pub enum StringComponentKind {
|
||||
IgnoreNewline,
|
||||
Char(CharComponentKind),
|
||||
}
|
||||
|
||||
pub struct StringComponentIterator<'a> {
|
||||
parser: Parser<'a>,
|
||||
pub has_closing_quote: bool,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for StringComponentIterator<'a> {
|
||||
type Item = StringComponent;
|
||||
fn next(&mut self) -> Option<StringComponent> {
|
||||
if self.parser.pos == 0 {
|
||||
assert!(
|
||||
self.parser.advance() == '"',
|
||||
"string literal should start with double quotes"
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(component) = self.parser.parse_string_component() {
|
||||
return Some(component);
|
||||
}
|
||||
|
||||
// We get here when there are no char components left to parse
|
||||
if self.parser.peek() == Some('"') {
|
||||
self.parser.advance();
|
||||
self.has_closing_quote = true;
|
||||
}
|
||||
|
||||
assert!(
|
||||
self.parser.peek() == None,
|
||||
"string literal should leave no unparsed input: src = {}, pos = {}, length = {}",
|
||||
self.parser.src,
|
||||
self.parser.pos,
|
||||
self.parser.src.len()
|
||||
);
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_char_literal(src: &str) -> CharComponentIterator {
|
||||
CharComponentIterator {
|
||||
parser: Parser::new(src),
|
||||
has_closing_quote: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||
pub struct CharComponent {
|
||||
pub range: TextRange,
|
||||
pub kind: CharComponentKind,
|
||||
}
|
||||
|
||||
impl CharComponent {
|
||||
fn new(range: TextRange, kind: CharComponentKind) -> CharComponent {
|
||||
CharComponent { range, kind }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||
pub enum CharComponentKind {
|
||||
CodePoint,
|
||||
AsciiEscape,
|
||||
AsciiCodeEscape,
|
||||
UnicodeEscape,
|
||||
}
|
||||
|
||||
pub struct CharComponentIterator<'a> {
|
||||
parser: Parser<'a>,
|
||||
pub has_closing_quote: bool,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for CharComponentIterator<'a> {
|
||||
type Item = CharComponent;
|
||||
fn next(&mut self) -> Option<CharComponent> {
|
||||
if self.parser.pos == 0 {
|
||||
assert!(
|
||||
self.parser.advance() == '\'',
|
||||
"char literal should start with a quote"
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(component) = self.parser.parse_char_component() {
|
||||
return Some(component);
|
||||
}
|
||||
|
||||
// We get here when there are no char components left to parse
|
||||
if self.parser.peek() == Some('\'') {
|
||||
self.parser.advance();
|
||||
self.has_closing_quote = true;
|
||||
}
|
||||
|
||||
assert!(
|
||||
self.parser.peek() == None,
|
||||
"char literal should leave no unparsed input: src = {}, pos = {}, length = {}",
|
||||
self.parser.src,
|
||||
self.parser.pos,
|
||||
self.parser.src.len()
|
||||
);
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Parser<'a> {
|
||||
src: &'a str,
|
||||
pos: usize,
|
||||
}
|
||||
|
||||
impl<'a> Parser<'a> {
|
||||
pub fn new(src: &'a str) -> Parser<'a> {
|
||||
Parser { src, pos: 0 }
|
||||
}
|
||||
|
||||
// Utility methods
|
||||
|
||||
pub fn peek(&self) -> Option<char> {
|
||||
if self.pos == self.src.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
self.src[self.pos..].chars().next()
|
||||
}
|
||||
|
||||
pub fn advance(&mut self) -> char {
|
||||
let next = self
|
||||
.peek()
|
||||
.expect("cannot advance if end of input is reached");
|
||||
self.pos += next.len_utf8();
|
||||
next
|
||||
}
|
||||
|
||||
pub fn skip_whitespace(&mut self) {
|
||||
while self.peek().map(|c| c.is_whitespace()) == Some(true) {
|
||||
self.advance();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_pos(&self) -> TextUnit {
|
||||
(self.pos as u32).into()
|
||||
}
|
||||
|
||||
// Char parsing methods
|
||||
|
||||
fn parse_unicode_escape(&mut self, start: TextUnit) -> CharComponent {
|
||||
match self.peek() {
|
||||
Some('{') => {
|
||||
self.advance();
|
||||
|
||||
// Parse anything until we reach `}`
|
||||
while let Some(next) = self.peek() {
|
||||
self.advance();
|
||||
if next == '}' {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let end = self.get_pos();
|
||||
CharComponent::new(TextRange::from_to(start, end), UnicodeEscape)
|
||||
}
|
||||
Some(_) | None => {
|
||||
let end = self.get_pos();
|
||||
CharComponent::new(TextRange::from_to(start, end), UnicodeEscape)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_ascii_code_escape(&mut self, start: TextUnit) -> CharComponent {
|
||||
let code_start = self.get_pos();
|
||||
while let Some(next) = self.peek() {
|
||||
if next == '\'' || (self.get_pos() - code_start == 2.into()) {
|
||||
break;
|
||||
}
|
||||
|
||||
self.advance();
|
||||
}
|
||||
|
||||
let end = self.get_pos();
|
||||
CharComponent::new(TextRange::from_to(start, end), AsciiCodeEscape)
|
||||
}
|
||||
|
||||
fn parse_escape(&mut self, start: TextUnit) -> CharComponent {
|
||||
if self.peek().is_none() {
|
||||
return CharComponent::new(TextRange::from_to(start, start), AsciiEscape);
|
||||
}
|
||||
|
||||
let next = self.advance();
|
||||
let end = self.get_pos();
|
||||
let range = TextRange::from_to(start, end);
|
||||
match next {
|
||||
'x' => self.parse_ascii_code_escape(start),
|
||||
'u' => self.parse_unicode_escape(start),
|
||||
_ => CharComponent::new(range, AsciiEscape),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_char_component(&mut self) -> Option<CharComponent> {
|
||||
let next = self.peek()?;
|
||||
|
||||
// Ignore character close
|
||||
if next == '\'' {
|
||||
return None;
|
||||
}
|
||||
|
||||
let start = self.get_pos();
|
||||
self.advance();
|
||||
|
||||
if next == '\\' {
|
||||
Some(self.parse_escape(start))
|
||||
} else {
|
||||
let end = self.get_pos();
|
||||
Some(CharComponent::new(
|
||||
TextRange::from_to(start, end),
|
||||
CodePoint,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_ignore_newline(&mut self, start: TextUnit) -> Option<StringComponent> {
|
||||
// In string literals, when a `\` occurs immediately before the newline, the `\`,
|
||||
// the newline, and all whitespace at the beginning of the next line are ignored
|
||||
match self.peek() {
|
||||
Some('\n') | Some('\r') => {
|
||||
self.skip_whitespace();
|
||||
Some(StringComponent::new(
|
||||
TextRange::from_to(start, self.get_pos()),
|
||||
StringComponentKind::IgnoreNewline,
|
||||
))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_string_component(&mut self) -> Option<StringComponent> {
|
||||
let next = self.peek()?;
|
||||
|
||||
// Ignore string close
|
||||
if next == '"' {
|
||||
return None;
|
||||
}
|
||||
|
||||
let start = self.get_pos();
|
||||
self.advance();
|
||||
|
||||
if next == '\\' {
|
||||
// Strings can use `\` to ignore newlines, so we first try to parse one of those
|
||||
// before falling back to parsing char escapes
|
||||
self.parse_ignore_newline(start).or_else(|| {
|
||||
let char_component = self.parse_escape(start);
|
||||
Some(StringComponent::new(
|
||||
char_component.range,
|
||||
StringComponentKind::Char(char_component.kind),
|
||||
))
|
||||
})
|
||||
} else {
|
||||
let end = self.get_pos();
|
||||
Some(StringComponent::new(
|
||||
TextRange::from_to(start, end),
|
||||
StringComponentKind::Char(CodePoint),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn parse(src: &str) -> (bool, Vec<CharComponent>) {
|
||||
let component_iterator = &mut super::parse_char_literal(src);
|
||||
let components: Vec<_> = component_iterator.collect();
|
||||
(component_iterator.has_closing_quote, components)
|
||||
}
|
||||
|
||||
fn unclosed_char_component(src: &str) -> CharComponent {
|
||||
let (has_closing_quote, components) = parse(src);
|
||||
assert!(!has_closing_quote, "char should not have closing quote");
|
||||
assert!(components.len() == 1);
|
||||
components[0].clone()
|
||||
}
|
||||
|
||||
fn closed_char_component(src: &str) -> CharComponent {
|
||||
let (has_closing_quote, components) = parse(src);
|
||||
assert!(has_closing_quote, "char should have closing quote");
|
||||
assert!(
|
||||
components.len() == 1,
|
||||
"Literal: {}\nComponents: {:#?}",
|
||||
src,
|
||||
components
|
||||
);
|
||||
components[0].clone()
|
||||
}
|
||||
|
||||
fn closed_char_components(src: &str) -> Vec<CharComponent> {
|
||||
let (has_closing_quote, components) = parse(src);
|
||||
assert!(has_closing_quote, "char should have closing quote");
|
||||
components
|
||||
}
|
||||
|
||||
fn range_closed(src: &str) -> TextRange {
|
||||
TextRange::from_to(1.into(), (src.len() as u32 - 1).into())
|
||||
}
|
||||
|
||||
fn range_unclosed(src: &str) -> TextRange {
|
||||
TextRange::from_to(1.into(), (src.len() as u32).into())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unicode_escapes() {
|
||||
let unicode_escapes = &[r"{DEAD}", "{BEEF}", "{FF}", "{}", ""];
|
||||
for escape in unicode_escapes {
|
||||
let escape_sequence = format!(r"'\u{}'", escape);
|
||||
let component = closed_char_component(&escape_sequence);
|
||||
let expected_range = range_closed(&escape_sequence);
|
||||
assert_eq!(component.kind, CharComponentKind::UnicodeEscape);
|
||||
assert_eq!(component.range, expected_range);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unicode_escapes_unclosed() {
|
||||
let unicode_escapes = &["{DEAD", "{BEEF", "{FF"];
|
||||
for escape in unicode_escapes {
|
||||
let escape_sequence = format!(r"'\u{}'", escape);
|
||||
let component = unclosed_char_component(&escape_sequence);
|
||||
let expected_range = range_unclosed(&escape_sequence);
|
||||
assert_eq!(component.kind, CharComponentKind::UnicodeEscape);
|
||||
assert_eq!(component.range, expected_range);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_char() {
|
||||
let (has_closing_quote, components) = parse("''");
|
||||
assert!(has_closing_quote, "char should have closing quote");
|
||||
assert!(components.len() == 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unclosed_char() {
|
||||
let component = unclosed_char_component("'a");
|
||||
assert!(component.kind == CodePoint);
|
||||
assert!(component.range == TextRange::from_to(1.into(), 2.into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_digit_escapes() {
|
||||
let literals = &[r"", r"5", r"55"];
|
||||
|
||||
for literal in literals {
|
||||
let lit_text = format!(r"'\x{}'", literal);
|
||||
let component = closed_char_component(&lit_text);
|
||||
assert!(component.kind == CharComponentKind::AsciiCodeEscape);
|
||||
assert!(component.range == range_closed(&lit_text));
|
||||
}
|
||||
|
||||
// More than 2 digits starts a new codepoint
|
||||
let components = closed_char_components(r"'\x555'");
|
||||
assert!(components.len() == 2);
|
||||
assert!(components[1].kind == CharComponentKind::CodePoint);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ascii_escapes() {
|
||||
let literals = &[
|
||||
r"\'", "\\\"", // equivalent to \"
|
||||
r"\n", r"\r", r"\t", r"\\", r"\0",
|
||||
];
|
||||
|
||||
for literal in literals {
|
||||
let lit_text = format!("'{}'", literal);
|
||||
let component = closed_char_component(&lit_text);
|
||||
assert!(component.kind == CharComponentKind::AsciiEscape);
|
||||
assert!(component.range == range_closed(&lit_text));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_escapes() {
|
||||
let literals = &['"', 'n', 'r', 't', '0', 'x', 'u'];
|
||||
|
||||
for &literal in literals {
|
||||
let lit_text = format!("'{}'", literal);
|
||||
let component = closed_char_component(&lit_text);
|
||||
assert!(component.kind == CharComponentKind::CodePoint);
|
||||
assert!(component.range == range_closed(&lit_text));
|
||||
}
|
||||
}
|
||||
}
|
51
crates/ra_syntax/src/string_lexing/byte.rs
Normal file
51
crates/ra_syntax/src/string_lexing/byte.rs
Normal file
@ -0,0 +1,51 @@
|
||||
use super::parser::Parser;
|
||||
use super::CharComponent;
|
||||
|
||||
pub fn parse_byte_literal(src: &str) -> ByteComponentIterator {
|
||||
ByteComponentIterator {
|
||||
parser: Parser::new(src),
|
||||
has_closing_quote: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ByteComponentIterator<'a> {
|
||||
parser: Parser<'a>,
|
||||
pub has_closing_quote: bool,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for ByteComponentIterator<'a> {
|
||||
type Item = CharComponent;
|
||||
fn next(&mut self) -> Option<CharComponent> {
|
||||
if self.parser.pos == 0 {
|
||||
assert!(
|
||||
self.parser.advance() == 'b',
|
||||
"Byte literal should start with a `b`"
|
||||
);
|
||||
|
||||
assert!(
|
||||
self.parser.advance() == '\'',
|
||||
"Byte literal should start with a `b`, followed by a quote"
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(component) = self.parser.parse_char_component() {
|
||||
return Some(component);
|
||||
}
|
||||
|
||||
// We get here when there are no char components left to parse
|
||||
if self.parser.peek() == Some('\'') {
|
||||
self.parser.advance();
|
||||
self.has_closing_quote = true;
|
||||
}
|
||||
|
||||
assert!(
|
||||
self.parser.peek() == None,
|
||||
"byte literal should leave no unparsed input: src = {}, pos = {}, length = {}",
|
||||
self.parser.src,
|
||||
self.parser.pos,
|
||||
self.parser.src.len()
|
||||
);
|
||||
|
||||
None
|
||||
}
|
||||
}
|
51
crates/ra_syntax/src/string_lexing/byte_string.rs
Normal file
51
crates/ra_syntax/src/string_lexing/byte_string.rs
Normal file
@ -0,0 +1,51 @@
|
||||
use super::parser::Parser;
|
||||
use super::StringComponent;
|
||||
|
||||
pub fn parse_byte_string_literal(src: &str) -> ByteStringComponentIterator {
|
||||
ByteStringComponentIterator {
|
||||
parser: Parser::new(src),
|
||||
has_closing_quote: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ByteStringComponentIterator<'a> {
|
||||
parser: Parser<'a>,
|
||||
pub has_closing_quote: bool,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for ByteStringComponentIterator<'a> {
|
||||
type Item = StringComponent;
|
||||
fn next(&mut self) -> Option<StringComponent> {
|
||||
if self.parser.pos == 0 {
|
||||
assert!(
|
||||
self.parser.advance() == 'b',
|
||||
"byte string literal should start with a `b`"
|
||||
);
|
||||
|
||||
assert!(
|
||||
self.parser.advance() == '"',
|
||||
"byte string literal should start with a `b`, followed by double quotes"
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(component) = self.parser.parse_string_component() {
|
||||
return Some(component);
|
||||
}
|
||||
|
||||
// We get here when there are no char components left to parse
|
||||
if self.parser.peek() == Some('"') {
|
||||
self.parser.advance();
|
||||
self.has_closing_quote = true;
|
||||
}
|
||||
|
||||
assert!(
|
||||
self.parser.peek() == None,
|
||||
"byte string literal should leave no unparsed input: src = {}, pos = {}, length = {}",
|
||||
self.parser.src,
|
||||
self.parser.pos,
|
||||
self.parser.src.len()
|
||||
);
|
||||
|
||||
None
|
||||
}
|
||||
}
|
176
crates/ra_syntax/src/string_lexing/char.rs
Normal file
176
crates/ra_syntax/src/string_lexing/char.rs
Normal file
@ -0,0 +1,176 @@
|
||||
use super::parser::Parser;
|
||||
use super::CharComponent;
|
||||
|
||||
pub fn parse_char_literal(src: &str) -> CharComponentIterator {
|
||||
CharComponentIterator {
|
||||
parser: Parser::new(src),
|
||||
has_closing_quote: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CharComponentIterator<'a> {
|
||||
parser: Parser<'a>,
|
||||
pub has_closing_quote: bool,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for CharComponentIterator<'a> {
|
||||
type Item = CharComponent;
|
||||
fn next(&mut self) -> Option<CharComponent> {
|
||||
if self.parser.pos == 0 {
|
||||
assert!(
|
||||
self.parser.advance() == '\'',
|
||||
"char literal should start with a quote"
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(component) = self.parser.parse_char_component() {
|
||||
return Some(component);
|
||||
}
|
||||
|
||||
// We get here when there are no char components left to parse
|
||||
if self.parser.peek() == Some('\'') {
|
||||
self.parser.advance();
|
||||
self.has_closing_quote = true;
|
||||
}
|
||||
|
||||
assert!(
|
||||
self.parser.peek() == None,
|
||||
"char literal should leave no unparsed input: src = {}, pos = {}, length = {}",
|
||||
self.parser.src,
|
||||
self.parser.pos,
|
||||
self.parser.src.len()
|
||||
);
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use rowan::TextRange;
|
||||
use crate::string_lexing::{
|
||||
CharComponent,
|
||||
CharComponentKind::*,
|
||||
};
|
||||
|
||||
fn parse(src: &str) -> (bool, Vec<CharComponent>) {
|
||||
let component_iterator = &mut super::parse_char_literal(src);
|
||||
let components: Vec<_> = component_iterator.collect();
|
||||
(component_iterator.has_closing_quote, components)
|
||||
}
|
||||
|
||||
fn unclosed_char_component(src: &str) -> CharComponent {
|
||||
let (has_closing_quote, components) = parse(src);
|
||||
assert!(!has_closing_quote, "char should not have closing quote");
|
||||
assert!(components.len() == 1);
|
||||
components[0].clone()
|
||||
}
|
||||
|
||||
fn closed_char_component(src: &str) -> CharComponent {
|
||||
let (has_closing_quote, components) = parse(src);
|
||||
assert!(has_closing_quote, "char should have closing quote");
|
||||
assert!(
|
||||
components.len() == 1,
|
||||
"Literal: {}\nComponents: {:#?}",
|
||||
src,
|
||||
components
|
||||
);
|
||||
components[0].clone()
|
||||
}
|
||||
|
||||
fn closed_char_components(src: &str) -> Vec<CharComponent> {
|
||||
let (has_closing_quote, components) = parse(src);
|
||||
assert!(has_closing_quote, "char should have closing quote");
|
||||
components
|
||||
}
|
||||
|
||||
fn range_closed(src: &str) -> TextRange {
|
||||
TextRange::from_to(1.into(), (src.len() as u32 - 1).into())
|
||||
}
|
||||
|
||||
fn range_unclosed(src: &str) -> TextRange {
|
||||
TextRange::from_to(1.into(), (src.len() as u32).into())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unicode_escapes() {
|
||||
let unicode_escapes = &[r"{DEAD}", "{BEEF}", "{FF}", "{}", ""];
|
||||
for escape in unicode_escapes {
|
||||
let escape_sequence = format!(r"'\u{}'", escape);
|
||||
let component = closed_char_component(&escape_sequence);
|
||||
let expected_range = range_closed(&escape_sequence);
|
||||
assert_eq!(component.kind, UnicodeEscape);
|
||||
assert_eq!(component.range, expected_range);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unicode_escapes_unclosed() {
|
||||
let unicode_escapes = &["{DEAD", "{BEEF", "{FF"];
|
||||
for escape in unicode_escapes {
|
||||
let escape_sequence = format!(r"'\u{}'", escape);
|
||||
let component = unclosed_char_component(&escape_sequence);
|
||||
let expected_range = range_unclosed(&escape_sequence);
|
||||
assert_eq!(component.kind, UnicodeEscape);
|
||||
assert_eq!(component.range, expected_range);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_char() {
|
||||
let (has_closing_quote, components) = parse("''");
|
||||
assert!(has_closing_quote, "char should have closing quote");
|
||||
assert!(components.len() == 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unclosed_char() {
|
||||
let component = unclosed_char_component("'a");
|
||||
assert!(component.kind == CodePoint);
|
||||
assert!(component.range == TextRange::from_to(1.into(), 2.into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_digit_escapes() {
|
||||
let literals = &[r"", r"5", r"55"];
|
||||
|
||||
for literal in literals {
|
||||
let lit_text = format!(r"'\x{}'", literal);
|
||||
let component = closed_char_component(&lit_text);
|
||||
assert!(component.kind == AsciiCodeEscape);
|
||||
assert!(component.range == range_closed(&lit_text));
|
||||
}
|
||||
|
||||
// More than 2 digits starts a new codepoint
|
||||
let components = closed_char_components(r"'\x555'");
|
||||
assert!(components.len() == 2);
|
||||
assert!(components[1].kind == CodePoint);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ascii_escapes() {
|
||||
let literals = &[
|
||||
r"\'", "\\\"", // equivalent to \"
|
||||
r"\n", r"\r", r"\t", r"\\", r"\0",
|
||||
];
|
||||
|
||||
for literal in literals {
|
||||
let lit_text = format!("'{}'", literal);
|
||||
let component = closed_char_component(&lit_text);
|
||||
assert!(component.kind == AsciiEscape);
|
||||
assert!(component.range == range_closed(&lit_text));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_escapes() {
|
||||
let literals = &['"', 'n', 'r', 't', '0', 'x', 'u'];
|
||||
|
||||
for &literal in literals {
|
||||
let lit_text = format!("'{}'", literal);
|
||||
let component = closed_char_component(&lit_text);
|
||||
assert!(component.kind == CodePoint);
|
||||
assert!(component.range == range_closed(&lit_text));
|
||||
}
|
||||
}
|
||||
}
|
13
crates/ra_syntax/src/string_lexing/mod.rs
Normal file
13
crates/ra_syntax/src/string_lexing/mod.rs
Normal file
@ -0,0 +1,13 @@
|
||||
mod parser;
|
||||
mod byte;
|
||||
mod byte_string;
|
||||
mod char;
|
||||
mod string;
|
||||
|
||||
pub use self::{
|
||||
byte::parse_byte_literal,
|
||||
byte_string::parse_byte_string_literal,
|
||||
char::parse_char_literal,
|
||||
parser::{CharComponent, CharComponentKind, StringComponent, StringComponentKind},
|
||||
string::parse_string_literal,
|
||||
};
|
201
crates/ra_syntax/src/string_lexing/parser.rs
Normal file
201
crates/ra_syntax/src/string_lexing/parser.rs
Normal file
@ -0,0 +1,201 @@
|
||||
use rowan::{TextRange, TextUnit};
|
||||
|
||||
use self::CharComponentKind::*;
|
||||
|
||||
pub struct Parser<'a> {
|
||||
pub(super) src: &'a str,
|
||||
pub(super) pos: usize,
|
||||
}
|
||||
|
||||
impl<'a> Parser<'a> {
|
||||
pub fn new(src: &'a str) -> Parser<'a> {
|
||||
Parser { src, pos: 0 }
|
||||
}
|
||||
|
||||
// Utility methods
|
||||
|
||||
pub fn peek(&self) -> Option<char> {
|
||||
if self.pos == self.src.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
self.src[self.pos..].chars().next()
|
||||
}
|
||||
|
||||
pub fn advance(&mut self) -> char {
|
||||
let next = self
|
||||
.peek()
|
||||
.expect("cannot advance if end of input is reached");
|
||||
self.pos += next.len_utf8();
|
||||
next
|
||||
}
|
||||
|
||||
pub fn skip_whitespace(&mut self) {
|
||||
while self.peek().map(|c| c.is_whitespace()) == Some(true) {
|
||||
self.advance();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_pos(&self) -> TextUnit {
|
||||
(self.pos as u32).into()
|
||||
}
|
||||
|
||||
// Char parsing methods
|
||||
|
||||
fn parse_unicode_escape(&mut self, start: TextUnit) -> CharComponent {
|
||||
match self.peek() {
|
||||
Some('{') => {
|
||||
self.advance();
|
||||
|
||||
// Parse anything until we reach `}`
|
||||
while let Some(next) = self.peek() {
|
||||
self.advance();
|
||||
if next == '}' {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let end = self.get_pos();
|
||||
CharComponent::new(TextRange::from_to(start, end), UnicodeEscape)
|
||||
}
|
||||
Some(_) | None => {
|
||||
let end = self.get_pos();
|
||||
CharComponent::new(TextRange::from_to(start, end), UnicodeEscape)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_ascii_code_escape(&mut self, start: TextUnit) -> CharComponent {
|
||||
let code_start = self.get_pos();
|
||||
while let Some(next) = self.peek() {
|
||||
if next == '\'' || (self.get_pos() - code_start == 2.into()) {
|
||||
break;
|
||||
}
|
||||
|
||||
self.advance();
|
||||
}
|
||||
|
||||
let end = self.get_pos();
|
||||
CharComponent::new(TextRange::from_to(start, end), AsciiCodeEscape)
|
||||
}
|
||||
|
||||
fn parse_escape(&mut self, start: TextUnit) -> CharComponent {
|
||||
if self.peek().is_none() {
|
||||
return CharComponent::new(TextRange::from_to(start, start), AsciiEscape);
|
||||
}
|
||||
|
||||
let next = self.advance();
|
||||
let end = self.get_pos();
|
||||
let range = TextRange::from_to(start, end);
|
||||
match next {
|
||||
'x' => self.parse_ascii_code_escape(start),
|
||||
'u' => self.parse_unicode_escape(start),
|
||||
_ => CharComponent::new(range, AsciiEscape),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_char_component(&mut self) -> Option<CharComponent> {
|
||||
let next = self.peek()?;
|
||||
|
||||
// Ignore character close
|
||||
if next == '\'' {
|
||||
return None;
|
||||
}
|
||||
|
||||
let start = self.get_pos();
|
||||
self.advance();
|
||||
|
||||
if next == '\\' {
|
||||
Some(self.parse_escape(start))
|
||||
} else {
|
||||
let end = self.get_pos();
|
||||
Some(CharComponent::new(
|
||||
TextRange::from_to(start, end),
|
||||
CodePoint,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_ignore_newline(&mut self, start: TextUnit) -> Option<StringComponent> {
|
||||
// In string literals, when a `\` occurs immediately before the newline, the `\`,
|
||||
// the newline, and all whitespace at the beginning of the next line are ignored
|
||||
match self.peek() {
|
||||
Some('\n') | Some('\r') => {
|
||||
self.skip_whitespace();
|
||||
Some(StringComponent::new(
|
||||
TextRange::from_to(start, self.get_pos()),
|
||||
StringComponentKind::IgnoreNewline,
|
||||
))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_string_component(&mut self) -> Option<StringComponent> {
|
||||
let next = self.peek()?;
|
||||
|
||||
// Ignore string close
|
||||
if next == '"' {
|
||||
return None;
|
||||
}
|
||||
|
||||
let start = self.get_pos();
|
||||
self.advance();
|
||||
|
||||
if next == '\\' {
|
||||
// Strings can use `\` to ignore newlines, so we first try to parse one of those
|
||||
// before falling back to parsing char escapes
|
||||
self.parse_ignore_newline(start).or_else(|| {
|
||||
let char_component = self.parse_escape(start);
|
||||
Some(StringComponent::new(
|
||||
char_component.range,
|
||||
StringComponentKind::Char(char_component.kind),
|
||||
))
|
||||
})
|
||||
} else {
|
||||
let end = self.get_pos();
|
||||
Some(StringComponent::new(
|
||||
TextRange::from_to(start, end),
|
||||
StringComponentKind::Char(CodePoint),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||
pub struct StringComponent {
|
||||
pub range: TextRange,
|
||||
pub kind: StringComponentKind,
|
||||
}
|
||||
|
||||
impl StringComponent {
|
||||
fn new(range: TextRange, kind: StringComponentKind) -> StringComponent {
|
||||
StringComponent { range, kind }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||
pub enum StringComponentKind {
|
||||
IgnoreNewline,
|
||||
Char(CharComponentKind),
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||
pub struct CharComponent {
|
||||
pub range: TextRange,
|
||||
pub kind: CharComponentKind,
|
||||
}
|
||||
|
||||
impl CharComponent {
|
||||
fn new(range: TextRange, kind: CharComponentKind) -> CharComponent {
|
||||
CharComponent { range, kind }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||
pub enum CharComponentKind {
|
||||
CodePoint,
|
||||
AsciiEscape,
|
||||
AsciiCodeEscape,
|
||||
UnicodeEscape,
|
||||
}
|
46
crates/ra_syntax/src/string_lexing/string.rs
Normal file
46
crates/ra_syntax/src/string_lexing/string.rs
Normal file
@ -0,0 +1,46 @@
|
||||
use super::parser::Parser;
|
||||
use super::StringComponent;
|
||||
|
||||
pub fn parse_string_literal(src: &str) -> StringComponentIterator {
|
||||
StringComponentIterator {
|
||||
parser: Parser::new(src),
|
||||
has_closing_quote: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub struct StringComponentIterator<'a> {
|
||||
parser: Parser<'a>,
|
||||
pub has_closing_quote: bool,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for StringComponentIterator<'a> {
|
||||
type Item = StringComponent;
|
||||
fn next(&mut self) -> Option<StringComponent> {
|
||||
if self.parser.pos == 0 {
|
||||
assert!(
|
||||
self.parser.advance() == '"',
|
||||
"string literal should start with double quotes"
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(component) = self.parser.parse_string_component() {
|
||||
return Some(component);
|
||||
}
|
||||
|
||||
// We get here when there are no char components left to parse
|
||||
if self.parser.peek() == Some('"') {
|
||||
self.parser.advance();
|
||||
self.has_closing_quote = true;
|
||||
}
|
||||
|
||||
assert!(
|
||||
self.parser.peek() == None,
|
||||
"string literal should leave no unparsed input: src = {}, pos = {}, length = {}",
|
||||
self.parser.src,
|
||||
self.parser.pos,
|
||||
self.parser.src.len()
|
||||
);
|
||||
|
||||
None
|
||||
}
|
||||
}
|
211
crates/ra_syntax/src/validation/byte.rs
Normal file
211
crates/ra_syntax/src/validation/byte.rs
Normal file
@ -0,0 +1,211 @@
|
||||
//! Validation of byte literals
|
||||
|
||||
use crate::{
|
||||
ast::{self, AstNode},
|
||||
string_lexing::{self, CharComponentKind},
|
||||
TextRange,
|
||||
validation::char,
|
||||
yellow::{
|
||||
SyntaxError,
|
||||
SyntaxErrorKind::*,
|
||||
},
|
||||
};
|
||||
|
||||
pub(super) fn validate_byte_node(node: ast::Byte, errors: &mut Vec<SyntaxError>) {
|
||||
let literal_text = node.text();
|
||||
let literal_range = node.syntax().range();
|
||||
let mut components = string_lexing::parse_byte_literal(literal_text);
|
||||
let mut len = 0;
|
||||
for component in &mut components {
|
||||
len += 1;
|
||||
let text = &literal_text[component.range];
|
||||
let range = component.range + literal_range.start();
|
||||
validate_byte_component(text, component.kind, range, errors);
|
||||
}
|
||||
|
||||
if !components.has_closing_quote {
|
||||
errors.push(SyntaxError::new(UnclosedByte, literal_range));
|
||||
}
|
||||
|
||||
if len == 0 {
|
||||
errors.push(SyntaxError::new(EmptyByte, literal_range));
|
||||
}
|
||||
|
||||
if len > 1 {
|
||||
errors.push(SyntaxError::new(OverlongByte, literal_range));
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn validate_byte_component(
|
||||
text: &str,
|
||||
kind: CharComponentKind,
|
||||
range: TextRange,
|
||||
errors: &mut Vec<SyntaxError>,
|
||||
) {
|
||||
use self::CharComponentKind::*;
|
||||
match kind {
|
||||
AsciiEscape => validate_byte_escape(text, range, errors),
|
||||
AsciiCodeEscape => validate_byte_code_escape(text, range, errors),
|
||||
UnicodeEscape => errors.push(SyntaxError::new(UnicodeEscapeForbidden, range)),
|
||||
CodePoint => {
|
||||
let c = text
|
||||
.chars()
|
||||
.next()
|
||||
.expect("Code points should be one character long");
|
||||
|
||||
// These bytes must always be escaped
|
||||
if c == '\t' || c == '\r' || c == '\n' {
|
||||
errors.push(SyntaxError::new(UnescapedByte, range));
|
||||
}
|
||||
|
||||
// Only ASCII bytes are allowed
|
||||
if c > 0x7F as char {
|
||||
errors.push(SyntaxError::new(ByteOutOfRange, range));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_byte_escape(text: &str, range: TextRange, errors: &mut Vec<SyntaxError>) {
|
||||
if text.len() == 1 {
|
||||
// Escape sequence consists only of leading `\`
|
||||
errors.push(SyntaxError::new(EmptyByteEscape, range));
|
||||
} else {
|
||||
let escape_code = text.chars().skip(1).next().unwrap();
|
||||
if !char::is_ascii_escape(escape_code) {
|
||||
errors.push(SyntaxError::new(InvalidByteEscape, range));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_byte_code_escape(text: &str, range: TextRange, errors: &mut Vec<SyntaxError>) {
|
||||
// A ByteCodeEscape has 4 chars, example: `\xDD`
|
||||
if text.len() < 4 {
|
||||
errors.push(SyntaxError::new(TooShortByteCodeEscape, range));
|
||||
} else {
|
||||
assert!(
|
||||
text.chars().count() == 4,
|
||||
"ByteCodeEscape cannot be longer than 4 chars"
|
||||
);
|
||||
|
||||
if u8::from_str_radix(&text[2..], 16).is_err() {
|
||||
errors.push(SyntaxError::new(MalformedByteCodeEscape, range));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::SourceFileNode;
|
||||
|
||||
fn build_file(literal: &str) -> SourceFileNode {
|
||||
let src = format!("const C: u8 = b'{}';", literal);
|
||||
SourceFileNode::parse(&src)
|
||||
}
|
||||
|
||||
fn assert_valid_byte(literal: &str) {
|
||||
let file = build_file(literal);
|
||||
assert!(
|
||||
file.errors().len() == 0,
|
||||
"Errors for literal '{}': {:?}",
|
||||
literal,
|
||||
file.errors()
|
||||
);
|
||||
}
|
||||
|
||||
fn assert_invalid_byte(literal: &str) {
|
||||
let file = build_file(literal);
|
||||
assert!(file.errors().len() > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ansi_codepoints() {
|
||||
for byte in 0..128 {
|
||||
match byte {
|
||||
b'\n' | b'\r' | b'\t' => assert_invalid_byte(&(byte as char).to_string()),
|
||||
b'\'' | b'\\' => { /* Ignore character close and backslash */ }
|
||||
_ => assert_valid_byte(&(byte as char).to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
for byte in 128..=255u8 {
|
||||
assert_invalid_byte(&(byte as char).to_string());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unicode_codepoints() {
|
||||
let invalid = ["Ƒ", "バ", "メ", "﷽"];
|
||||
for c in &invalid {
|
||||
assert_invalid_byte(c);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unicode_multiple_codepoints() {
|
||||
let invalid = ["नी", "👨👨"];
|
||||
for c in &invalid {
|
||||
assert_invalid_byte(c);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_valid_byte_escape() {
|
||||
let valid = [r"\'", "\"", "\\\\", "\\\"", r"\n", r"\r", r"\t", r"\0"];
|
||||
for c in &valid {
|
||||
assert_valid_byte(c);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_byte_escape() {
|
||||
let invalid = [r"\a", r"\?", r"\"];
|
||||
for c in &invalid {
|
||||
assert_invalid_byte(c);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_valid_byte_code_escape() {
|
||||
let valid = [r"\x00", r"\x7F", r"\x55", r"\xF0"];
|
||||
for c in &valid {
|
||||
assert_valid_byte(c);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_byte_code_escape() {
|
||||
let invalid = [r"\x", r"\x7"];
|
||||
for c in &invalid {
|
||||
assert_invalid_byte(c);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_unicode_escape() {
|
||||
let well_formed = [
|
||||
r"\u{FF}",
|
||||
r"\u{0}",
|
||||
r"\u{F}",
|
||||
r"\u{10FFFF}",
|
||||
r"\u{1_0__FF___FF_____}",
|
||||
];
|
||||
for c in &well_formed {
|
||||
assert_invalid_byte(c);
|
||||
}
|
||||
|
||||
let invalid = [
|
||||
r"\u",
|
||||
r"\u{}",
|
||||
r"\u{",
|
||||
r"\u{FF",
|
||||
r"\u{FFFFFF}",
|
||||
r"\u{_F}",
|
||||
r"\u{00FFFFF}",
|
||||
r"\u{110000}",
|
||||
];
|
||||
for c in &invalid {
|
||||
assert_invalid_byte(c);
|
||||
}
|
||||
}
|
||||
}
|
178
crates/ra_syntax/src/validation/byte_string.rs
Normal file
178
crates/ra_syntax/src/validation/byte_string.rs
Normal file
@ -0,0 +1,178 @@
|
||||
use crate::{
|
||||
ast::{self, AstNode},
|
||||
string_lexing::{self, StringComponentKind},
|
||||
yellow::{
|
||||
SyntaxError,
|
||||
SyntaxErrorKind::*,
|
||||
},
|
||||
};
|
||||
|
||||
use super::byte;
|
||||
|
||||
pub(crate) fn validate_byte_string_node(node: ast::ByteString, errors: &mut Vec<SyntaxError>) {
|
||||
let literal_text = node.text();
|
||||
let literal_range = node.syntax().range();
|
||||
let mut components = string_lexing::parse_byte_string_literal(literal_text);
|
||||
for component in &mut components {
|
||||
let range = component.range + literal_range.start();
|
||||
|
||||
match component.kind {
|
||||
StringComponentKind::Char(kind) => {
|
||||
// Chars must escape \t, \n and \r codepoints, but strings don't
|
||||
let text = &literal_text[component.range];
|
||||
match text {
|
||||
"\t" | "\n" | "\r" => { /* always valid */ }
|
||||
_ => byte::validate_byte_component(text, kind, range, errors),
|
||||
}
|
||||
}
|
||||
StringComponentKind::IgnoreNewline => { /* always valid */ }
|
||||
}
|
||||
}
|
||||
|
||||
if !components.has_closing_quote {
|
||||
errors.push(SyntaxError::new(UnclosedString, literal_range));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::SourceFileNode;
|
||||
|
||||
fn build_file(literal: &str) -> SourceFileNode {
|
||||
let src = format!(r#"const S: &'static [u8] = b"{}";"#, literal);
|
||||
println!("Source: {}", src);
|
||||
SourceFileNode::parse(&src)
|
||||
}
|
||||
|
||||
fn assert_valid_str(literal: &str) {
|
||||
let file = build_file(literal);
|
||||
assert!(
|
||||
file.errors().len() == 0,
|
||||
"Errors for literal '{}': {:?}",
|
||||
literal,
|
||||
file.errors()
|
||||
);
|
||||
}
|
||||
|
||||
fn assert_invalid_str(literal: &str) {
|
||||
let file = build_file(literal);
|
||||
assert!(file.errors().len() > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ansi_codepoints() {
|
||||
for byte in 0..128 {
|
||||
match byte {
|
||||
b'\"' | b'\\' => { /* Ignore string close and backslash */ }
|
||||
_ => assert_valid_str(&(byte as char).to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
for byte in 128..=255u8 {
|
||||
assert_invalid_str(&(byte as char).to_string());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unicode_codepoints() {
|
||||
let invalid = ["Ƒ", "バ", "メ", "﷽"];
|
||||
for c in &invalid {
|
||||
assert_invalid_str(c);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unicode_multiple_codepoints() {
|
||||
let invalid = ["नी", "👨👨"];
|
||||
for c in &invalid {
|
||||
assert_invalid_str(c);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_valid_ascii_escape() {
|
||||
let valid = [r"\'", r#"\""#, r"\\", r"\n", r"\r", r"\t", r"\0", "a", "b"];
|
||||
for c in &valid {
|
||||
assert_valid_str(c);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_ascii_escape() {
|
||||
let invalid = [r"\a", r"\?", r"\"];
|
||||
for c in &invalid {
|
||||
assert_invalid_str(c);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_valid_ascii_code_escape() {
|
||||
let valid = [r"\x00", r"\x7F", r"\x55", r"\xF0"];
|
||||
for c in &valid {
|
||||
assert_valid_str(c);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_ascii_code_escape() {
|
||||
let invalid = [r"\x", r"\x7"];
|
||||
for c in &invalid {
|
||||
assert_invalid_str(c);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_unicode_escape() {
|
||||
let well_formed = [
|
||||
r"\u{FF}",
|
||||
r"\u{0}",
|
||||
r"\u{F}",
|
||||
r"\u{10FFFF}",
|
||||
r"\u{1_0__FF___FF_____}",
|
||||
];
|
||||
for c in &well_formed {
|
||||
assert_invalid_str(c);
|
||||
}
|
||||
|
||||
let invalid = [
|
||||
r"\u",
|
||||
r"\u{}",
|
||||
r"\u{",
|
||||
r"\u{FF",
|
||||
r"\u{FFFFFF}",
|
||||
r"\u{_F}",
|
||||
r"\u{00FFFFF}",
|
||||
r"\u{110000}",
|
||||
];
|
||||
for c in &invalid {
|
||||
assert_invalid_str(c);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mixed_invalid() {
|
||||
assert_invalid_str(
|
||||
r"This is the tale of a string
|
||||
with a newline in between, some emoji (👨👨) here and there,
|
||||
unicode escapes like this: \u{1FFBB} and weird stuff like
|
||||
this ﷽",
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mixed_valid() {
|
||||
assert_valid_str(
|
||||
r"This is the tale of a string
|
||||
with a newline in between, no emoji at all,
|
||||
nor unicode escapes or weird stuff",
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ignore_newline() {
|
||||
assert_valid_str(
|
||||
"Hello \
|
||||
World",
|
||||
);
|
||||
}
|
||||
}
|
@ -1,3 +1,5 @@
|
||||
//! Validation of char literals
|
||||
|
||||
use std::u32;
|
||||
|
||||
use arrayvec::ArrayString;
|
||||
@ -12,7 +14,7 @@
|
||||
},
|
||||
};
|
||||
|
||||
pub(crate) fn validate_char_node(node: ast::Char, errors: &mut Vec<SyntaxError>) {
|
||||
pub(super) fn validate_char_node(node: ast::Char, errors: &mut Vec<SyntaxError>) {
|
||||
let literal_text = node.text();
|
||||
let literal_range = node.syntax().range();
|
||||
let mut components = string_lexing::parse_char_literal(literal_text);
|
||||
@ -37,7 +39,7 @@ pub(crate) fn validate_char_node(node: ast::Char, errors: &mut Vec<SyntaxError>)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn validate_char_component(
|
||||
pub(super) fn validate_char_component(
|
||||
text: &str,
|
||||
kind: CharComponentKind,
|
||||
range: TextRange,
|
||||
@ -46,7 +48,19 @@ pub(crate) fn validate_char_component(
|
||||
// Validate escapes
|
||||
use self::CharComponentKind::*;
|
||||
match kind {
|
||||
AsciiEscape => {
|
||||
AsciiEscape => validate_ascii_escape(text, range, errors),
|
||||
AsciiCodeEscape => validate_ascii_code_escape(text, range, errors),
|
||||
UnicodeEscape => validate_unicode_escape(text, range, errors),
|
||||
CodePoint => {
|
||||
// These code points must always be escaped
|
||||
if text == "\t" || text == "\r" || text == "\n" {
|
||||
errors.push(SyntaxError::new(UnescapedCodepoint, range));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_ascii_escape(text: &str, range: TextRange, errors: &mut Vec<SyntaxError>) {
|
||||
if text.len() == 1 {
|
||||
// Escape sequence consists only of leading `\`
|
||||
errors.push(SyntaxError::new(EmptyAsciiEscape, range));
|
||||
@ -57,7 +71,15 @@ pub(crate) fn validate_char_component(
|
||||
}
|
||||
}
|
||||
}
|
||||
AsciiCodeEscape => {
|
||||
|
||||
pub(super) fn is_ascii_escape(code: char) -> bool {
|
||||
match code {
|
||||
'\\' | '\'' | '"' | 'n' | 'r' | 't' | '0' => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_ascii_code_escape(text: &str, range: TextRange, errors: &mut Vec<SyntaxError>) {
|
||||
// An AsciiCodeEscape has 4 chars, example: `\xDD`
|
||||
if text.len() < 4 {
|
||||
errors.push(SyntaxError::new(TooShortAsciiCodeEscape, range));
|
||||
@ -74,7 +96,8 @@ pub(crate) fn validate_char_component(
|
||||
}
|
||||
}
|
||||
}
|
||||
UnicodeEscape => {
|
||||
|
||||
fn validate_unicode_escape(text: &str, range: TextRange, errors: &mut Vec<SyntaxError>) {
|
||||
assert!(&text[..2] == "\\u", "UnicodeEscape always starts with \\u");
|
||||
|
||||
if text.len() == 2 {
|
||||
@ -136,21 +159,6 @@ pub(crate) fn validate_char_component(
|
||||
}
|
||||
}
|
||||
}
|
||||
CodePoint => {
|
||||
// These code points must always be escaped
|
||||
if text == "\t" || text == "\r" {
|
||||
errors.push(SyntaxError::new(UnescapedCodepoint, range));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_ascii_escape(code: char) -> bool {
|
||||
match code {
|
||||
'\\' | '\'' | '"' | 'n' | 'r' | 't' | '0' => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
@ -205,9 +213,7 @@ fn test_unicode_multiple_codepoints() {
|
||||
|
||||
#[test]
|
||||
fn test_valid_ascii_escape() {
|
||||
let valid = [
|
||||
r"\'", "\"", "\\\\", "\\\"", r"\n", r"\r", r"\t", r"\0", "a", "b",
|
||||
];
|
||||
let valid = [r"\'", "\"", "\\\\", "\\\"", r"\n", r"\r", r"\t", r"\0"];
|
||||
for c in &valid {
|
||||
assert_valid_char(c);
|
||||
}
|
||||
|
@ -5,6 +5,8 @@
|
||||
yellow::SyntaxError,
|
||||
};
|
||||
|
||||
mod byte;
|
||||
mod byte_string;
|
||||
mod char;
|
||||
mod string;
|
||||
|
||||
@ -12,6 +14,8 @@ pub(crate) fn validate(file: &SourceFileNode) -> Vec<SyntaxError> {
|
||||
let mut errors = Vec::new();
|
||||
for node in file.syntax().descendants() {
|
||||
let _ = visitor_ctx(&mut errors)
|
||||
.visit::<ast::Byte, _>(self::byte::validate_byte_node)
|
||||
.visit::<ast::ByteString, _>(self::byte_string::validate_byte_string_node)
|
||||
.visit::<ast::Char, _>(self::char::validate_char_node)
|
||||
.visit::<ast::String, _>(self::string::validate_string_node)
|
||||
.accept(node);
|
||||
|
@ -72,6 +72,16 @@ pub enum SyntaxErrorKind {
|
||||
EmptyChar,
|
||||
UnclosedChar,
|
||||
OverlongChar,
|
||||
EmptyByte,
|
||||
UnclosedByte,
|
||||
OverlongByte,
|
||||
ByteOutOfRange,
|
||||
UnescapedByte,
|
||||
EmptyByteEscape,
|
||||
InvalidByteEscape,
|
||||
TooShortByteCodeEscape,
|
||||
MalformedByteCodeEscape,
|
||||
UnicodeEscapeForbidden,
|
||||
EmptyAsciiEscape,
|
||||
InvalidAsciiEscape,
|
||||
TooShortAsciiCodeEscape,
|
||||
@ -98,6 +108,19 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
EmptyChar => write!(f, "Empty char literal"),
|
||||
UnclosedChar => write!(f, "Unclosed char literal"),
|
||||
OverlongChar => write!(f, "Char literal should be one character long"),
|
||||
EmptyByte => write!(f, "Empty byte literal"),
|
||||
UnclosedByte => write!(f, "Unclosed byte literal"),
|
||||
OverlongByte => write!(f, "Byte literal should be one character long"),
|
||||
ByteOutOfRange => write!(f, "Byte should be a valid ASCII character"),
|
||||
UnescapedByte => write!(f, "This byte should always be escaped"),
|
||||
EmptyByteEscape => write!(f, "Empty escape sequence"),
|
||||
InvalidByteEscape => write!(f, "Invalid escape sequence"),
|
||||
TooShortByteCodeEscape => write!(f, "Escape sequence should have two digits"),
|
||||
MalformedByteCodeEscape => write!(f, "Escape sequence should be a hexadecimal number"),
|
||||
UnicodeEscapeForbidden => write!(
|
||||
f,
|
||||
"Unicode escapes are not allowed in byte literals or byte strings"
|
||||
),
|
||||
TooShortAsciiCodeEscape => write!(f, "Escape sequence should have two digits"),
|
||||
AsciiCodeEscapeOutOfRange => {
|
||||
write!(f, "Escape sequence should be between \\x00 and \\x7F")
|
||||
|
Loading…
Reference in New Issue
Block a user