Switch to TryFrom

This commit is contained in:
Aleksey Kladov 2020-04-25 00:57:47 +02:00
parent dc2151085e
commit 63a462f37c
13 changed files with 63 additions and 53 deletions

View File

@ -60,7 +60,6 @@ pub(crate) fn add_custom_impl(ctx: AssistCtx) -> Option<Assist> {
.collect::<Vec<SmolStr>>();
let has_more_derives = !new_attr_input.is_empty();
let new_attr_input = new_attr_input.iter().sep_by(", ").surround_with("(", ")").to_string();
let new_attr_input_len = new_attr_input.len();
let mut buf = String::new();
buf.push_str("\n\nimpl ");
@ -70,8 +69,9 @@ pub(crate) fn add_custom_impl(ctx: AssistCtx) -> Option<Assist> {
buf.push_str(" {\n");
let cursor_delta = if has_more_derives {
let delta = input.syntax().text_range().len() - TextSize::of(&new_attr_input);
edit.replace(input.syntax().text_range(), new_attr_input);
input.syntax().text_range().len() - TextSize::from_usize(new_attr_input_len)
delta
} else {
let attr_range = attr.syntax().text_range();
edit.delete(attr_range);

View File

@ -129,7 +129,7 @@ fn render(self) -> Option<FunctionTemplate> {
let fn_def = indent_once.increase_indent(fn_def);
let fn_def = ast::make::add_trailing_newlines(1, fn_def);
let fn_def = indent.increase_indent(fn_def);
(fn_def, it.syntax().text_range().start() + TextSize::from_usize(1))
(fn_def, it.syntax().text_range().start() + TextSize::of('{'))
}
};

View File

@ -77,13 +77,13 @@ pub(crate) fn add_new(ctx: AssistCtx) -> Option<Assist> {
.text_range()
.end();
Some((start, TextSize::from_usize(1)))
Some((start, TextSize::of("\n")))
})
.unwrap_or_else(|| {
buf = generate_impl_text(&strukt, &buf);
let start = strukt.syntax().text_range().end();
(start, TextSize::from_usize(3))
(start, TextSize::of("\n}\n"))
});
edit.set_cursor(start_offset + TextSize::of(&buf) - end_offset);

View File

@ -89,7 +89,7 @@ enum CursorPos {
edit.target(current_text_range);
edit.set_cursor(match cursor_pos {
CursorPos::InExpr(back_offset) => start + TextSize::from_usize(arm.len()) - back_offset,
CursorPos::InExpr(back_offset) => start + TextSize::of(&arm) - back_offset,
CursorPos::InPat(offset) => offset,
});
edit.replace(TextRange::new(start, end), arm);

View File

@ -1,8 +1,9 @@
//! `LineIndex` maps flat `TextSize` offsets into `(Line, Column)`
//! representation.
use std::iter;
use ra_syntax::{TextRange, TextSize};
use rustc_hash::FxHashMap;
use std::iter;
use superslice::Ext;
#[derive(Clone, Debug, PartialEq, Eq)]
@ -116,12 +117,11 @@ fn utf8_to_utf16_col(&self, line: u32, col: TextSize) -> usize {
res
}
fn utf16_to_utf8_col(&self, line: u32, col: u32) -> TextSize {
let mut col: TextSize = col.into();
fn utf16_to_utf8_col(&self, line: u32, mut col: u32) -> TextSize {
if let Some(utf16_chars) = self.utf16_lines.get(&line) {
for c in utf16_chars {
if col >= c.start {
col += c.len() - TextSize::from_usize(1);
if col >= u32::from(c.start) {
col += u32::from(c.len()) - 1;
} else {
// From here on, all utf16 characters come *after* the character we are mapping,
// so we don't need to take them into account
@ -130,12 +130,12 @@ fn utf16_to_utf8_col(&self, line: u32, col: u32) -> TextSize {
}
}
col
col.into()
}
}
#[cfg(test)]
mod test_line_index {
mod tests {
use super::*;
#[test]
@ -224,12 +224,12 @@ fn test_string() {
assert!(col_index.utf8_to_utf16_col(2, 15.into()) == 15);
// UTF-16 to UTF-8
assert_eq!(col_index.utf16_to_utf8_col(1, 15), TextSize::from_usize(15));
assert_eq!(col_index.utf16_to_utf8_col(1, 15), TextSize::from(15));
assert_eq!(col_index.utf16_to_utf8_col(1, 18), TextSize::from_usize(20));
assert_eq!(col_index.utf16_to_utf8_col(1, 19), TextSize::from_usize(23));
assert_eq!(col_index.utf16_to_utf8_col(1, 18), TextSize::from(20));
assert_eq!(col_index.utf16_to_utf8_col(1, 19), TextSize::from(23));
assert_eq!(col_index.utf16_to_utf8_col(2, 15), TextSize::from_usize(15));
assert_eq!(col_index.utf16_to_utf8_col(2, 15), TextSize::from(15));
}
#[test]

View File

@ -7,6 +7,8 @@
//! Code in this module applies this "to (Line, Column) after edit"
//! transformation.
use std::convert::TryInto;
use ra_syntax::{TextRange, TextSize};
use ra_text_edit::{AtomTextEdit, TextEdit};
@ -139,14 +141,15 @@ fn next(&mut self) -> Option<Step> {
.text
.char_indices()
.filter_map(|(i, c)| {
let i: TextSize = i.try_into().unwrap();
let char_len = TextSize::of(c);
if c == '\n' {
let next_offset = self.offset + TextSize::from_usize(i + 1);
let next_offset = self.offset + i + char_len;
let next = Step::Newline(next_offset);
Some((next, next_offset))
} else {
let char_len = TextSize::of(c);
if char_len > TextSize::from_usize(1) {
let start = self.offset + TextSize::from_usize(i);
if !c.is_ascii() {
let start = self.offset + i;
let end = start + char_len;
let next = Step::Utf16Char(TextRange::new(start, end));
let next_offset = end;

View File

@ -4,7 +4,7 @@
//! get a super-set of matches. Then, we we confirm each match using precise
//! name resolution.
use std::mem;
use std::{convert::TryInto, mem};
use hir::{DefWithBody, HasSource, Module, ModuleSource, Semantics, Visibility};
use once_cell::unsync::Lazy;
@ -207,7 +207,7 @@ pub fn find_usages(
let tree = Lazy::new(|| sema.parse(file_id).syntax().clone());
for (idx, _) in text.match_indices(pat) {
let offset = TextSize::from_usize(idx);
let offset: TextSize = idx.try_into().unwrap();
if !search_range.contains_inclusive(offset) {
tested_by!(search_filters_by_range; force);
continue;

View File

@ -516,7 +516,7 @@ fn convert_doc_comment(&self, token: &Self::Token) -> Option<Vec<tt::TokenTree>>
fn bump(&mut self) -> Option<(Self::Token, TextRange)> {
if let Some((punct, offset)) = self.punct_offset.clone() {
if usize::from(offset) + 1 < punct.text().len() {
let offset = offset + TextSize::from_usize(1);
let offset = offset + TextSize::of('.');
let range = punct.text_range();
self.punct_offset = Some((punct.clone(), offset));
let range = TextRange::at(range.start() + offset, TextSize::of('.'));
@ -532,9 +532,9 @@ fn bump(&mut self) -> Option<(Self::Token, TextRange)> {
let token = if curr.kind().is_punct() {
let range = curr.text_range();
let range = TextRange::at(range.start(), TextSize::from_usize(1));
self.punct_offset = Some((curr.clone(), TextSize::from_usize(0)));
(SynToken::Punch(curr, TextSize::from_usize(0)), range)
let range = TextRange::at(range.start(), TextSize::of('.'));
self.punct_offset = Some((curr.clone(), 0.into()));
(SynToken::Punch(curr, 0.into()), range)
} else {
self.punct_offset = None;
let range = curr.text_range();
@ -546,7 +546,7 @@ fn bump(&mut self) -> Option<(Self::Token, TextRange)> {
fn peek(&self) -> Option<Self::Token> {
if let Some((punct, mut offset)) = self.punct_offset.clone() {
offset = offset + TextSize::from_usize(1);
offset = offset + TextSize::of('.');
if usize::from(offset) < punct.text().len() {
return Some(SynToken::Punch(punct, offset));
}
@ -558,7 +558,7 @@ fn peek(&self) -> Option<Self::Token> {
}
let token = if curr.kind().is_punct() {
SynToken::Punch(curr, TextSize::from_usize(0))
SynToken::Punch(curr, 0.into())
} else {
SynToken::Ordiniary(curr)
};

View File

@ -1,5 +1,7 @@
//! There are many AstNodes, but only a few tokens, so we hand-write them here.
use std::convert::{TryFrom, TryInto};
use crate::{
ast::{AstToken, Comment, RawString, String, Whitespace},
TextRange, TextSize,
@ -95,8 +97,8 @@ fn new(literal: &str) -> Option<QuoteOffsets> {
}
let start = TextSize::from(0);
let left_quote = TextSize::from_usize(left_quote) + TextSize::of('"');
let right_quote = TextSize::from_usize(right_quote);
let left_quote = TextSize::try_from(left_quote).unwrap() + TextSize::of('"');
let right_quote = TextSize::try_from(right_quote).unwrap();
let end = TextSize::of(literal);
let res = QuoteOffsets {
@ -498,7 +500,7 @@ fn char_ranges(
let mut res = Vec::with_capacity(text.len());
rustc_lexer::unescape::unescape_str(text, &mut |range, unescaped_char| {
res.push((
TextRange::new(TextSize::from_usize(range.start), TextSize::from_usize(range.end))
TextRange::new(range.start.try_into().unwrap(), range.end.try_into().unwrap())
+ offset,
unescaped_char,
))
@ -518,11 +520,7 @@ fn char_ranges(
let mut res = Vec::with_capacity(text.len());
for (idx, c) in text.char_indices() {
res.push((
TextRange::new(TextSize::from_usize(idx), TextSize::from_usize(idx + c.len_utf8()))
+ offset,
Ok(c),
));
res.push((TextRange::at(idx.try_into().unwrap(), TextSize::of(c)) + offset, Ok(c)));
}
Some(res)
}

View File

@ -1,8 +1,13 @@
//! FIXME: write short doc here
use crate::{validation, AstNode, SourceFile, TextRange, TextSize};
use std::{
convert::TryInto,
str::{self, FromStr},
};
use ra_text_edit::AtomTextEdit;
use std::str::{self, FromStr};
use crate::{validation, AstNode, SourceFile, TextRange};
fn check_file_invariants(file: &SourceFile) {
let root = file.syntax();
@ -35,7 +40,7 @@ pub fn from_data(data: &[u8]) -> Option<Self> {
let text = format!("{}{}{}", PREFIX, text, SUFFIX);
text.get(delete_start..delete_start.checked_add(delete_len)?)?; // make sure delete is a valid range
let delete =
TextRange::at(TextSize::from_usize(delete_start), TextSize::from_usize(delete_len));
TextRange::at(delete_start.try_into().unwrap(), delete_len.try_into().unwrap());
let edited_text =
format!("{}{}{}", &text[..delete_start], &insert, &text[delete_start + delete_len..]);
let edit = AtomTextEdit { delete, insert };

View File

@ -1,6 +1,8 @@
//! Lexer analyzes raw input string and produces lexemes (tokens).
//! It is just a bridge to `rustc_lexer`.
use std::convert::TryInto;
use crate::{
SyntaxError,
SyntaxKind::{self, *},
@ -28,18 +30,19 @@ pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
let mut tokens = Vec::new();
let mut errors = Vec::new();
let mut offset: usize = rustc_lexer::strip_shebang(text)
.map(|shebang_len| {
tokens.push(Token { kind: SHEBANG, len: TextSize::from_usize(shebang_len) });
let mut offset = match rustc_lexer::strip_shebang(text) {
Some(shebang_len) => {
tokens.push(Token { kind: SHEBANG, len: shebang_len.try_into().unwrap() });
shebang_len
})
.unwrap_or(0);
}
None => 0,
};
let text_without_shebang = &text[offset..];
for rustc_token in rustc_lexer::tokenize(text_without_shebang) {
let token_len = TextSize::from_usize(rustc_token.len);
let token_range = TextRange::at(TextSize::from_usize(offset), token_len);
let token_len: TextSize = rustc_token.len.try_into().unwrap();
let token_range = TextRange::at(offset.try_into().unwrap(), token_len);
let (syntax_kind, err_message) =
rustc_token_kind_to_syntax_kind(&rustc_token.kind, &text[token_range]);
@ -96,10 +99,9 @@ fn lex_first_token(text: &str) -> Option<(Token, Option<SyntaxError>)> {
let rustc_token = rustc_lexer::first_token(text);
let (syntax_kind, err_message) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, text);
let token = Token { kind: syntax_kind, len: TextSize::from_usize(rustc_token.len) };
let optional_error = err_message.map(|err_message| {
SyntaxError::new(err_message, TextRange::new(0.into(), TextSize::of(text)))
});
let token = Token { kind: syntax_kind, len: rustc_token.len.try_into().unwrap() };
let optional_error = err_message
.map(|err_message| SyntaxError::new(err_message, TextRange::up_to(TextSize::of(text))));
Some((token, optional_error))
}

View File

@ -121,7 +121,7 @@ fn assert_errors_are_absent(errors: &[SyntaxError], path: &Path) {
fn dump_tokens_and_errors(tokens: &[Token], errors: &[SyntaxError], text: &str) -> String {
let mut acc = String::new();
let mut offset = TextSize::from_usize(0);
let mut offset: TextSize = 0.into();
for token in tokens {
let token_len = token.len;
let token_text = &text[TextRange::at(offset, token.len)];

View File

@ -2,6 +2,8 @@
mod block;
use std::convert::TryFrom;
use rustc_lexer::unescape;
use crate::{
@ -112,7 +114,7 @@ fn unquote(text: &str, prefix_len: usize, end_delimiter: char) -> Option<&str> {
// FIXME: lift this lambda refactor to `fn` (https://github.com/rust-analyzer/rust-analyzer/pull/2834#discussion_r366199205)
let mut push_err = |prefix_len, (off, err): (usize, unescape::EscapeError)| {
let off = token.text_range().start() + TextSize::from_usize(off + prefix_len);
let off = token.text_range().start() + TextSize::try_from(off + prefix_len).unwrap();
acc.push(SyntaxError::new_at_offset(rustc_unescape_error_to_string(err), off));
};