rust/src/types.rs

221 lines
9.1 KiB
Rust
Raw Normal View History

use rustc::lint::*;
2014-11-19 02:57:34 -06:00
use syntax::ast;
use syntax::ast::*;
use syntax::ast_util::{is_comparison_binop, binop_to_string};
use syntax::ptr::P;
use rustc::middle::ty;
use syntax::codemap::ExpnInfo;
2014-11-19 02:57:34 -06:00
use utils::{in_macro, snippet, span_lint, span_help_and_lint, in_external_macro};
2015-07-26 09:53:11 -05:00
2014-11-19 03:02:47 -06:00
/// Handles all the linting of funky types
#[allow(missing_copy_implementations)]
2014-11-19 02:57:34 -06:00
pub struct TypePass;
2014-12-25 17:54:44 -06:00
declare_lint!(pub BOX_VEC, Warn,
"usage of `Box<Vec<T>>`, vector elements are already on the heap");
2015-03-02 04:43:44 -06:00
declare_lint!(pub LINKEDLIST, Warn,
"usage of LinkedList, usually a vector is faster, or a more specialized data \
structure like a RingBuf");
2014-11-19 02:57:34 -06:00
2014-11-19 03:02:47 -06:00
/// Matches a type with a provided string, and returns its type parameters if successful
2014-11-19 02:57:34 -06:00
pub fn match_ty_unwrap<'a>(ty: &'a Ty, segments: &[&str]) -> Option<&'a [P<Ty>]> {
match ty.node {
2015-04-13 12:44:45 -05:00
TyPath(_, Path {segments: ref seg, ..}) => {
2014-11-19 02:57:34 -06:00
// So ast::Path isn't the full path, just the tokens that were provided.
// I could muck around with the maps and find the full path
// however the more efficient way is to simply reverse the iterators and zip them
// which will compare them in reverse until one of them runs out of segments
if seg.iter().rev().zip(segments.iter().rev()).all(|(a,b)| a.identifier.name == b) {
2015-04-13 12:44:45 -05:00
match seg[..].last() {
2014-11-19 02:57:34 -06:00
Some(&PathSegment {parameters: AngleBracketedParameters(ref a), ..}) => {
2015-04-13 12:44:45 -05:00
Some(&a.types[..])
2014-11-19 02:57:34 -06:00
}
_ => None
}
} else {
None
}
},
_ => None
}
}
#[allow(unused_imports)]
2014-11-19 02:57:34 -06:00
impl LintPass for TypePass {
fn get_lints(&self) -> LintArray {
2015-03-02 04:43:44 -06:00
lint_array!(BOX_VEC, LINKEDLIST)
2014-11-19 02:57:34 -06:00
}
fn check_ty(&mut self, cx: &Context, ty: &ast::Ty) {
2014-11-20 01:07:37 -06:00
{
// In case stuff gets moved around
use std::boxed::Box;
use std::vec::Vec;
}
2015-01-06 22:05:34 -06:00
match_ty_unwrap(ty, &["std", "boxed", "Box"]).and_then(|t| t.first())
2015-05-07 00:52:16 -05:00
.and_then(|t| match_ty_unwrap(&**t, &["std", "vec", "Vec"]))
2014-11-19 02:57:34 -06:00
.map(|_| {
span_help_and_lint(cx, BOX_VEC, ty.span,
"you seem to be trying to use `Box<Vec<T>>`. Did you mean to use `Vec<T>`?",
"`Vec<T>` is already on the heap, `Box<Vec<T>>` makes an extra allocation");
2014-11-19 02:57:34 -06:00
});
2014-11-20 01:07:37 -06:00
{
// In case stuff gets moved around
2015-03-02 04:43:44 -06:00
use collections::linked_list::LinkedList as DL1;
use std::collections::linked_list::LinkedList as DL2;
2014-11-20 01:07:37 -06:00
}
2015-03-02 04:43:44 -06:00
let dlists = [vec!["std","collections","linked_list","LinkedList"],
vec!["collections","linked_list","LinkedList"]];
for path in &dlists {
2015-04-13 12:44:45 -05:00
if match_ty_unwrap(ty, &path[..]).is_some() {
span_help_and_lint(cx, LINKEDLIST, ty.span,
2015-03-02 04:43:44 -06:00
"I see you're using a LinkedList! Perhaps you meant some other data structure?",
"a RingBuf might work");
2014-11-20 01:07:37 -06:00
return;
}
}
2014-11-19 02:57:34 -06:00
}
}
#[allow(missing_copy_implementations)]
pub struct LetPass;
declare_lint!(pub LET_UNIT_VALUE, Warn,
"creating a let binding to a value of unit type, which usually can't be used afterwards");
fn check_let_unit(cx: &Context, decl: &Decl, info: Option<&ExpnInfo>) {
if in_macro(cx, info) { return; }
if let DeclLocal(ref local) = decl.node {
let bindtype = &cx.tcx.pat_ty(&*local.pat).sty;
if *bindtype == ty::TyTuple(vec![]) {
span_lint(cx, LET_UNIT_VALUE, decl.span, &format!(
"this let-binding has unit value. Consider omitting `let {} =`",
snippet(cx, local.pat.span, "..")));
}
}
}
impl LintPass for LetPass {
fn get_lints(&self) -> LintArray {
lint_array!(LET_UNIT_VALUE)
}
fn check_decl(&mut self, cx: &Context, decl: &Decl) {
cx.sess().codemap().with_expn_info(
decl.span.expn_id,
|info| check_let_unit(cx, decl, info));
}
}
declare_lint!(pub UNIT_CMP, Warn,
"comparing unit values (which is always `true` or `false`, respectively)");
#[allow(missing_copy_implementations)]
pub struct UnitCmp;
impl LintPass for UnitCmp {
fn get_lints(&self) -> LintArray {
lint_array!(UNIT_CMP)
}
fn check_expr(&mut self, cx: &Context, expr: &Expr) {
if let ExprBinary(ref cmp, ref left, _) = expr.node {
let op = cmp.node;
let sty = &cx.tcx.expr_ty(left).sty;
if *sty == ty::TyTuple(vec![]) && is_comparison_binop(op) {
let result = match op {
BiEq | BiLe | BiGe => "true",
_ => "false"
};
span_lint(cx, UNIT_CMP, expr.span, &format!(
"{}-comparison of unit values detected. This will always be {}",
binop_to_string(op), result));
}
}
}
}
pub struct CastPass;
declare_lint!(pub CAST_PRECISION_LOSS, Allow,
2015-08-20 07:24:26 -05:00
"casts that cause loss of precision, e.g `x as f32` where `x: u64`");
declare_lint!(pub CAST_SIGN_LOSS, Allow,
2015-08-20 07:24:26 -05:00
"casts from signed types to unsigned types, e.g `x as u32` where `x: i32`");
declare_lint!(pub CAST_POSSIBLE_OVERFLOW, Allow,
2015-08-20 07:24:26 -05:00
"casts that may cause overflow, e.g `x as u8` where `x: u32`, or `x as i32` where `x: f32`");
impl LintPass for CastPass {
fn get_lints(&self) -> LintArray {
lint_array!(CAST_PRECISION_LOSS,
CAST_SIGN_LOSS,
CAST_POSSIBLE_OVERFLOW)
}
fn check_expr(&mut self, cx: &Context, expr: &Expr) {
if let ExprCast(ref ex, _) = expr.node {
let (cast_from, cast_to) = (cx.tcx.expr_ty(&*ex), cx.tcx.expr_ty(expr));
if cast_from.is_numeric() && cast_to.is_numeric() && !in_external_macro(cx, expr.span) {
match (cast_from.is_integral(), cast_to.is_integral()) {
2015-08-20 14:37:37 -05:00
(true, false) => {
let from_nbits = match &cast_from.sty {
&ty::TyInt(i) => 4 << (i as usize),
&ty::TyUint(u) => 4 << (u as usize),
_ => 0
};
let to_nbits : usize = match &cast_to.sty {
&ty::TyFloat(ast::TyF32) => 32,
&ty::TyFloat(ast::TyF64) => 64,
_ => 0
};
if from_nbits != 4 {
// Handle TyIs/TyUs separately (size is arch dependant)
if from_nbits >= to_nbits {
span_lint(cx, CAST_PRECISION_LOSS, expr.span,
&format!("converting from {} to {}, which causes a loss of precision",
cast_from, cast_to));
}
}
},
(false, true) => {
span_lint(cx, CAST_POSSIBLE_OVERFLOW, expr.span,
&format!("the contents of a {} may overflow a {}", cast_from, cast_to));
2015-08-20 07:50:26 -05:00
if !cast_to.is_signed() {
span_lint(cx, CAST_SIGN_LOSS, expr.span,
&format!("casting from {} to {} loses the sign of the value", cast_from, cast_to));
}
},
(true, true) => {
2015-08-20 14:37:37 -05:00
if cast_from.is_signed() && !cast_to.is_signed() {
span_lint(cx, CAST_SIGN_LOSS, expr.span,
&format!("casting from {} to {} loses the sign of the value", cast_from, cast_to));
}
let from_nbits = match &cast_from.sty {
&ty::TyInt(i) => 4 << (i as usize),
&ty::TyUint(u) => 4 << (u as usize),
_ => 0
};
let to_nbits = match &cast_to.sty {
&ty::TyInt(i) => 4 << (i as usize),
&ty::TyUint(u) => 4 << (u as usize),
_ => 0
};
if to_nbits < from_nbits ||
(!cast_from.is_signed() && cast_to.is_signed() && to_nbits <= from_nbits) {
span_lint(cx, CAST_POSSIBLE_OVERFLOW, expr.span,
&format!("the contents of a {} may overflow a {}", cast_from, cast_to));
}
}
(false, false) => {
if let (&ty::TyFloat(ast::TyF64),
&ty::TyFloat(ast::TyF32)) = (&cast_from.sty, &cast_to.sty) {
span_lint(cx, CAST_POSSIBLE_OVERFLOW, expr.span, "the contents of a f64 may overflow a f32");
}
}
}
}
}
}
}