Remove Option<!> return types.

Several compiler functions have `Option<!>` for their return type.
That's odd. The only valid return value is `None`, so why is this type
used?

Because it lets you write certain patterns slightly more concisely. E.g.
if you have these common patterns:
```
    let Some(a) = f() else { return };
    let Ok(b) = g() else { return };
```
you can shorten them to these:
```
    let a = f()?;
    let b = g().ok()?;
```
Huh.

An `Option` return type typically designates success/failure. How should
I interpret the type signature of a function that always returns (i.e.
doesn't panic), does useful work (modifying `&mut` arguments), and yet
only ever fails? This idiom subverts the type system for a cute
syntactic trick.

Furthermore, returning `Option<!>` from a function F makes things
syntactically more convenient within F, but makes things worse at F's
callsites. The callsites can themselves use `?` with F but should not,
because they will get an unconditional early return, which is almost
certainly not desirable. Instead the return value should be ignored.
(Note that some of callsites of `process_operand`, `process_immedate`,
`process_assign` actually do use `?`, though the early return doesn't
matter in these cases because nothing of significance comes after those
calls. Ugh.)

When I first saw this pattern I had no idea how to interpret it, and it
took me several minutes of close reading to understand everything I've
written above. I even started a Zulip thread about it to make sure I
understood it properly. "Save a few characters by introducing types so
weird that compiler devs have to discuss it on Zulip" feels like a bad
trade-off to me. This commit replaces all the `Option<!>` return values
and uses `else`/`return` (or something similar) to replace the relevant
`?` uses. The result is slightly more verbose but much easier to
understand.
This commit is contained in:
Nicholas Nethercote 2024-08-29 12:28:55 +10:00
parent bf662eb808
commit fa4f8925f1
3 changed files with 68 additions and 66 deletions

View File

@ -382,7 +382,7 @@ fn assign_constant(
place: PlaceIndex, place: PlaceIndex,
mut operand: OpTy<'tcx>, mut operand: OpTy<'tcx>,
projection: &[PlaceElem<'tcx>], projection: &[PlaceElem<'tcx>],
) -> Option<!> { ) {
for &(mut proj_elem) in projection { for &(mut proj_elem) in projection {
if let PlaceElem::Index(index) = proj_elem { if let PlaceElem::Index(index) = proj_elem {
if let FlatSet::Elem(index) = state.get(index.into(), &self.map) if let FlatSet::Elem(index) = state.get(index.into(), &self.map)
@ -391,10 +391,14 @@ fn assign_constant(
{ {
proj_elem = PlaceElem::ConstantIndex { offset, min_length, from_end: false }; proj_elem = PlaceElem::ConstantIndex { offset, min_length, from_end: false };
} else { } else {
return None; return;
} }
} }
operand = self.ecx.project(&operand, proj_elem).ok()?; operand = if let Ok(operand) = self.ecx.project(&operand, proj_elem) {
operand
} else {
return;
}
} }
self.map.for_each_projection_value( self.map.for_each_projection_value(
@ -426,8 +430,6 @@ fn assign_constant(
} }
}, },
); );
None
} }
fn binary_op( fn binary_op(

View File

@ -191,26 +191,26 @@ fn is_empty(&self, state: &State<ConditionSet<'a>>) -> bool {
/// Recursion entry point to find threading opportunities. /// Recursion entry point to find threading opportunities.
#[instrument(level = "trace", skip(self))] #[instrument(level = "trace", skip(self))]
fn start_from_switch(&mut self, bb: BasicBlock) -> Option<!> { fn start_from_switch(&mut self, bb: BasicBlock) {
let bbdata = &self.body[bb]; let bbdata = &self.body[bb];
if bbdata.is_cleanup || self.loop_headers.contains(bb) { if bbdata.is_cleanup || self.loop_headers.contains(bb) {
return None; return;
} }
let (discr, targets) = bbdata.terminator().kind.as_switch()?; let Some((discr, targets)) = bbdata.terminator().kind.as_switch() else { return };
let discr = discr.place()?; let Some(discr) = discr.place() else { return };
debug!(?discr, ?bb); debug!(?discr, ?bb);
let discr_ty = discr.ty(self.body, self.tcx).ty; let discr_ty = discr.ty(self.body, self.tcx).ty;
let discr_layout = self.ecx.layout_of(discr_ty).ok()?; let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return };
let discr = self.map.find(discr.as_ref())?; let Some(discr) = self.map.find(discr.as_ref()) else { return };
debug!(?discr); debug!(?discr);
let cost = CostChecker::new(self.tcx, self.param_env, None, self.body); let cost = CostChecker::new(self.tcx, self.param_env, None, self.body);
let mut state = State::new_reachable(); let mut state = State::new_reachable();
let conds = if let Some((value, then, else_)) = targets.as_static_if() { let conds = if let Some((value, then, else_)) = targets.as_static_if() {
let value = ScalarInt::try_from_uint(value, discr_layout.size)?; let Some(value) = ScalarInt::try_from_uint(value, discr_layout.size) else { return };
self.arena.alloc_from_iter([ self.arena.alloc_from_iter([
Condition { value, polarity: Polarity::Eq, target: then }, Condition { value, polarity: Polarity::Eq, target: then },
Condition { value, polarity: Polarity::Ne, target: else_ }, Condition { value, polarity: Polarity::Ne, target: else_ },
@ -225,7 +225,6 @@ fn start_from_switch(&mut self, bb: BasicBlock) -> Option<!> {
state.insert_value_idx(discr, conds, self.map); state.insert_value_idx(discr, conds, self.map);
self.find_opportunity(bb, state, cost, 0); self.find_opportunity(bb, state, cost, 0);
None
} }
/// Recursively walk statements backwards from this bb's terminator to find threading /// Recursively walk statements backwards from this bb's terminator to find threading
@ -364,18 +363,17 @@ fn process_immediate(
lhs: PlaceIndex, lhs: PlaceIndex,
rhs: ImmTy<'tcx>, rhs: ImmTy<'tcx>,
state: &mut State<ConditionSet<'a>>, state: &mut State<ConditionSet<'a>>,
) -> Option<!> { ) {
let register_opportunity = |c: Condition| { let register_opportunity = |c: Condition| {
debug!(?bb, ?c.target, "register"); debug!(?bb, ?c.target, "register");
self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target }) self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target })
}; };
let conditions = state.try_get_idx(lhs, self.map)?; if let Some(conditions) = state.try_get_idx(lhs, self.map)
if let Immediate::Scalar(Scalar::Int(int)) = *rhs { && let Immediate::Scalar(Scalar::Int(int)) = *rhs
{
conditions.iter_matches(int).for_each(register_opportunity); conditions.iter_matches(int).for_each(register_opportunity);
} }
None
} }
/// If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`. /// If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`.
@ -428,22 +426,23 @@ fn process_operand(
lhs: PlaceIndex, lhs: PlaceIndex,
rhs: &Operand<'tcx>, rhs: &Operand<'tcx>,
state: &mut State<ConditionSet<'a>>, state: &mut State<ConditionSet<'a>>,
) -> Option<!> { ) {
match rhs { match rhs {
// If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`. // If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`.
Operand::Constant(constant) => { Operand::Constant(constant) => {
let constant = let Ok(constant) =
self.ecx.eval_mir_constant(&constant.const_, constant.span, None).ok()?; self.ecx.eval_mir_constant(&constant.const_, constant.span, None)
else {
return;
};
self.process_constant(bb, lhs, constant, state); self.process_constant(bb, lhs, constant, state);
} }
// Transfer the conditions on the copied rhs. // Transfer the conditions on the copied rhs.
Operand::Move(rhs) | Operand::Copy(rhs) => { Operand::Move(rhs) | Operand::Copy(rhs) => {
let rhs = self.map.find(rhs.as_ref())?; let Some(rhs) = self.map.find(rhs.as_ref()) else { return };
state.insert_place_idx(rhs, lhs, self.map); state.insert_place_idx(rhs, lhs, self.map);
} }
} }
None
} }
#[instrument(level = "trace", skip(self))] #[instrument(level = "trace", skip(self))]
@ -453,16 +452,14 @@ fn process_assign(
lhs_place: &Place<'tcx>, lhs_place: &Place<'tcx>,
rhs: &Rvalue<'tcx>, rhs: &Rvalue<'tcx>,
state: &mut State<ConditionSet<'a>>, state: &mut State<ConditionSet<'a>>,
) -> Option<!> { ) {
let lhs = self.map.find(lhs_place.as_ref())?; let Some(lhs) = self.map.find(lhs_place.as_ref()) else { return };
match rhs { match rhs {
Rvalue::Use(operand) => self.process_operand(bb, lhs, operand, state)?, Rvalue::Use(operand) => self.process_operand(bb, lhs, operand, state),
// Transfer the conditions on the copy rhs. // Transfer the conditions on the copy rhs.
Rvalue::CopyForDeref(rhs) => { Rvalue::CopyForDeref(rhs) => self.process_operand(bb, lhs, &Operand::Copy(*rhs), state),
self.process_operand(bb, lhs, &Operand::Copy(*rhs), state)?
}
Rvalue::Discriminant(rhs) => { Rvalue::Discriminant(rhs) => {
let rhs = self.map.find_discr(rhs.as_ref())?; let Some(rhs) = self.map.find_discr(rhs.as_ref()) else { return };
state.insert_place_idx(rhs, lhs, self.map); state.insert_place_idx(rhs, lhs, self.map);
} }
// If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`. // If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`.
@ -470,7 +467,7 @@ fn process_assign(
let agg_ty = lhs_place.ty(self.body, self.tcx).ty; let agg_ty = lhs_place.ty(self.body, self.tcx).ty;
let lhs = match kind { let lhs = match kind {
// Do not support unions. // Do not support unions.
AggregateKind::Adt(.., Some(_)) => return None, AggregateKind::Adt(.., Some(_)) => return,
AggregateKind::Adt(_, variant_index, ..) if agg_ty.is_enum() => { AggregateKind::Adt(_, variant_index, ..) if agg_ty.is_enum() => {
if let Some(discr_target) = self.map.apply(lhs, TrackElem::Discriminant) if let Some(discr_target) = self.map.apply(lhs, TrackElem::Discriminant)
&& let Ok(discr_value) = && let Ok(discr_value) =
@ -478,7 +475,11 @@ fn process_assign(
{ {
self.process_immediate(bb, discr_target, discr_value, state); self.process_immediate(bb, discr_target, discr_value, state);
} }
self.map.apply(lhs, TrackElem::Variant(*variant_index))? if let Some(idx) = self.map.apply(lhs, TrackElem::Variant(*variant_index)) {
idx
} else {
return;
}
} }
_ => lhs, _ => lhs,
}; };
@ -490,8 +491,8 @@ fn process_assign(
} }
// Transfer the conditions on the copy rhs, after inversing polarity. // Transfer the conditions on the copy rhs, after inversing polarity.
Rvalue::UnaryOp(UnOp::Not, Operand::Move(place) | Operand::Copy(place)) => { Rvalue::UnaryOp(UnOp::Not, Operand::Move(place) | Operand::Copy(place)) => {
let conditions = state.try_get_idx(lhs, self.map)?; let Some(conditions) = state.try_get_idx(lhs, self.map) else { return };
let place = self.map.find(place.as_ref())?; let Some(place) = self.map.find(place.as_ref()) else { return };
let conds = conditions.map(self.arena, Condition::inv); let conds = conditions.map(self.arena, Condition::inv);
state.insert_value_idx(place, conds, self.map); state.insert_value_idx(place, conds, self.map);
} }
@ -502,21 +503,25 @@ fn process_assign(
box (Operand::Move(place) | Operand::Copy(place), Operand::Constant(value)) box (Operand::Move(place) | Operand::Copy(place), Operand::Constant(value))
| box (Operand::Constant(value), Operand::Move(place) | Operand::Copy(place)), | box (Operand::Constant(value), Operand::Move(place) | Operand::Copy(place)),
) => { ) => {
let conditions = state.try_get_idx(lhs, self.map)?; let Some(conditions) = state.try_get_idx(lhs, self.map) else { return };
let place = self.map.find(place.as_ref())?; let Some(place) = self.map.find(place.as_ref()) else { return };
let equals = match op { let equals = match op {
BinOp::Eq => ScalarInt::TRUE, BinOp::Eq => ScalarInt::TRUE,
BinOp::Ne => ScalarInt::FALSE, BinOp::Ne => ScalarInt::FALSE,
_ => return None, _ => return,
}; };
if value.const_.ty().is_floating_point() { if value.const_.ty().is_floating_point() {
// Floating point equality does not follow bit-patterns. // Floating point equality does not follow bit-patterns.
// -0.0 and NaN both have special rules for equality, // -0.0 and NaN both have special rules for equality,
// and therefore we cannot use integer comparisons for them. // and therefore we cannot use integer comparisons for them.
// Avoid handling them, though this could be extended in the future. // Avoid handling them, though this could be extended in the future.
return None; return;
} }
let value = value.const_.normalize(self.tcx, self.param_env).try_to_scalar_int()?; let Some(value) =
value.const_.normalize(self.tcx, self.param_env).try_to_scalar_int()
else {
return;
};
let conds = conditions.map(self.arena, |c| Condition { let conds = conditions.map(self.arena, |c| Condition {
value, value,
polarity: if c.matches(equals) { Polarity::Eq } else { Polarity::Ne }, polarity: if c.matches(equals) { Polarity::Eq } else { Polarity::Ne },
@ -527,8 +532,6 @@ fn process_assign(
_ => {} _ => {}
} }
None
} }
#[instrument(level = "trace", skip(self))] #[instrument(level = "trace", skip(self))]
@ -537,7 +540,7 @@ fn process_statement(
bb: BasicBlock, bb: BasicBlock,
stmt: &Statement<'tcx>, stmt: &Statement<'tcx>,
state: &mut State<ConditionSet<'a>>, state: &mut State<ConditionSet<'a>>,
) -> Option<!> { ) {
let register_opportunity = |c: Condition| { let register_opportunity = |c: Condition| {
debug!(?bb, ?c.target, "register"); debug!(?bb, ?c.target, "register");
self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target }) self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target })
@ -550,12 +553,12 @@ fn process_statement(
// If we expect `discriminant(place) ?= A`, // If we expect `discriminant(place) ?= A`,
// we have an opportunity if `variant_index ?= A`. // we have an opportunity if `variant_index ?= A`.
StatementKind::SetDiscriminant { box place, variant_index } => { StatementKind::SetDiscriminant { box place, variant_index } => {
let discr_target = self.map.find_discr(place.as_ref())?; let Some(discr_target) = self.map.find_discr(place.as_ref()) else { return };
let enum_ty = place.ty(self.body, self.tcx).ty; let enum_ty = place.ty(self.body, self.tcx).ty;
// `SetDiscriminant` may be a no-op if the assigned variant is the untagged variant // `SetDiscriminant` may be a no-op if the assigned variant is the untagged variant
// of a niche encoding. If we cannot ensure that we write to the discriminant, do // of a niche encoding. If we cannot ensure that we write to the discriminant, do
// nothing. // nothing.
let enum_layout = self.ecx.layout_of(enum_ty).ok()?; let Ok(enum_layout) = self.ecx.layout_of(enum_ty) else { return };
let writes_discriminant = match enum_layout.variants { let writes_discriminant = match enum_layout.variants {
Variants::Single { index } => { Variants::Single { index } => {
assert_eq!(index, *variant_index); assert_eq!(index, *variant_index);
@ -568,24 +571,25 @@ fn process_statement(
} => *variant_index != untagged_variant, } => *variant_index != untagged_variant,
}; };
if writes_discriminant { if writes_discriminant {
let discr = self.ecx.discriminant_for_variant(enum_ty, *variant_index).ok()?; let Ok(discr) = self.ecx.discriminant_for_variant(enum_ty, *variant_index)
self.process_immediate(bb, discr_target, discr, state)?; else {
return;
};
self.process_immediate(bb, discr_target, discr, state);
} }
} }
// If we expect `lhs ?= true`, we have an opportunity if we assume `lhs == true`. // If we expect `lhs ?= true`, we have an opportunity if we assume `lhs == true`.
StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume( StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(
Operand::Copy(place) | Operand::Move(place), Operand::Copy(place) | Operand::Move(place),
)) => { )) => {
let conditions = state.try_get(place.as_ref(), self.map)?; let Some(conditions) = state.try_get(place.as_ref(), self.map) else { return };
conditions.iter_matches(ScalarInt::TRUE).for_each(register_opportunity); conditions.iter_matches(ScalarInt::TRUE).for_each(register_opportunity);
} }
StatementKind::Assign(box (lhs_place, rhs)) => { StatementKind::Assign(box (lhs_place, rhs)) => {
self.process_assign(bb, lhs_place, rhs, state)?; self.process_assign(bb, lhs_place, rhs, state);
} }
_ => {} _ => {}
} }
None
} }
#[instrument(level = "trace", skip(self, state, cost))] #[instrument(level = "trace", skip(self, state, cost))]
@ -638,17 +642,17 @@ fn process_switch_int(
targets: &SwitchTargets, targets: &SwitchTargets,
target_bb: BasicBlock, target_bb: BasicBlock,
state: &mut State<ConditionSet<'a>>, state: &mut State<ConditionSet<'a>>,
) -> Option<!> { ) {
debug_assert_ne!(target_bb, START_BLOCK); debug_assert_ne!(target_bb, START_BLOCK);
debug_assert_eq!(self.body.basic_blocks.predecessors()[target_bb].len(), 1); debug_assert_eq!(self.body.basic_blocks.predecessors()[target_bb].len(), 1);
let discr = discr.place()?; let Some(discr) = discr.place() else { return };
let discr_ty = discr.ty(self.body, self.tcx).ty; let discr_ty = discr.ty(self.body, self.tcx).ty;
let discr_layout = self.ecx.layout_of(discr_ty).ok()?; let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return };
let conditions = state.try_get(discr.as_ref(), self.map)?; let Some(conditions) = state.try_get(discr.as_ref(), self.map) else { return };
if let Some((value, _)) = targets.iter().find(|&(_, target)| target == target_bb) { if let Some((value, _)) = targets.iter().find(|&(_, target)| target == target_bb) {
let value = ScalarInt::try_from_uint(value, discr_layout.size)?; let Some(value) = ScalarInt::try_from_uint(value, discr_layout.size) else { return };
debug_assert_eq!(targets.iter().filter(|&(_, target)| target == target_bb).count(), 1); debug_assert_eq!(targets.iter().filter(|&(_, target)| target == target_bb).count(), 1);
// We are inside `target_bb`. Since we have a single predecessor, we know we passed // We are inside `target_bb`. Since we have a single predecessor, we know we passed
@ -662,7 +666,7 @@ fn process_switch_int(
} else if let Some((value, _, else_bb)) = targets.as_static_if() } else if let Some((value, _, else_bb)) = targets.as_static_if()
&& target_bb == else_bb && target_bb == else_bb
{ {
let value = ScalarInt::try_from_uint(value, discr_layout.size)?; let Some(value) = ScalarInt::try_from_uint(value, discr_layout.size) else { return };
// We only know that `discr != value`. That's much weaker information than // We only know that `discr != value`. That's much weaker information than
// the equality we had in the previous arm. All we can conclude is that // the equality we had in the previous arm. All we can conclude is that
@ -675,8 +679,6 @@ fn process_switch_int(
} }
} }
} }
None
} }
} }

View File

@ -469,12 +469,12 @@ fn check_assertion(
msg: &AssertKind<Operand<'tcx>>, msg: &AssertKind<Operand<'tcx>>,
cond: &Operand<'tcx>, cond: &Operand<'tcx>,
location: Location, location: Location,
) -> Option<!> { ) {
let value = &self.eval_operand(cond)?; let Some(value) = &self.eval_operand(cond) else { return };
trace!("assertion on {:?} should be {:?}", value, expected); trace!("assertion on {:?} should be {:?}", value, expected);
let expected = Scalar::from_bool(expected); let expected = Scalar::from_bool(expected);
let value_const = self.use_ecx(|this| this.ecx.read_scalar(value))?; let Some(value_const) = self.use_ecx(|this| this.ecx.read_scalar(value)) else { return };
if expected != value_const { if expected != value_const {
// Poison all places this operand references so that further code // Poison all places this operand references so that further code
@ -516,14 +516,12 @@ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
AssertKind::BoundsCheck { len, index } AssertKind::BoundsCheck { len, index }
} }
// Remaining overflow errors are already covered by checks on the binary operators. // Remaining overflow errors are already covered by checks on the binary operators.
AssertKind::Overflow(..) | AssertKind::OverflowNeg(_) => return None, AssertKind::Overflow(..) | AssertKind::OverflowNeg(_) => return,
// Need proper const propagator for these. // Need proper const propagator for these.
_ => return None, _ => return,
}; };
self.report_assert_as_lint(location, AssertLintKind::UnconditionalPanic, msg); self.report_assert_as_lint(location, AssertLintKind::UnconditionalPanic, msg);
} }
None
} }
fn ensure_not_propagated(&self, local: Local) { fn ensure_not_propagated(&self, local: Local) {