2016-04-18 13:08:27 -05:00
|
|
|
// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT
|
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
|
|
|
//! Composable external iteration.
|
|
|
|
//!
|
|
|
|
//! If you've found yourself with a collection of some kind, and needed to
|
|
|
|
//! perform an operation on the elements of said collection, you'll quickly run
|
|
|
|
//! into 'iterators'. Iterators are heavily used in idiomatic Rust code, so
|
|
|
|
//! it's worth becoming familiar with them.
|
|
|
|
//!
|
|
|
|
//! Before explaining more, let's talk about how this module is structured:
|
|
|
|
//!
|
|
|
|
//! # Organization
|
|
|
|
//!
|
|
|
|
//! This module is largely organized by type:
|
|
|
|
//!
|
|
|
|
//! * [Traits] are the core portion: these traits define what kind of iterators
|
|
|
|
//! exist and what you can do with them. The methods of these traits are worth
|
|
|
|
//! putting some extra study time into.
|
|
|
|
//! * [Functions] provide some helpful ways to create some basic iterators.
|
|
|
|
//! * [Structs] are often the return types of the various methods on this
|
|
|
|
//! module's traits. You'll usually want to look at the method that creates
|
|
|
|
//! the `struct`, rather than the `struct` itself. For more detail about why,
|
|
|
|
//! see '[Implementing Iterator](#implementing-iterator)'.
|
|
|
|
//!
|
|
|
|
//! [Traits]: #traits
|
|
|
|
//! [Functions]: #functions
|
|
|
|
//! [Structs]: #structs
|
|
|
|
//!
|
|
|
|
//! That's it! Let's dig into iterators.
|
|
|
|
//!
|
|
|
|
//! # Iterator
|
|
|
|
//!
|
|
|
|
//! The heart and soul of this module is the [`Iterator`] trait. The core of
|
|
|
|
//! [`Iterator`] looks like this:
|
|
|
|
//!
|
|
|
|
//! ```
|
|
|
|
//! trait Iterator {
|
|
|
|
//! type Item;
|
|
|
|
//! fn next(&mut self) -> Option<Self::Item>;
|
|
|
|
//! }
|
|
|
|
//! ```
|
|
|
|
//!
|
2017-03-12 13:04:52 -05:00
|
|
|
//! An iterator has a method, [`next`], which when called, returns an
|
|
|
|
//! [`Option`]`<Item>`. [`next`] will return `Some(Item)` as long as there
|
2016-04-18 13:08:27 -05:00
|
|
|
//! are elements, and once they've all been exhausted, will return `None` to
|
|
|
|
//! indicate that iteration is finished. Individual iterators may choose to
|
2017-03-12 13:04:52 -05:00
|
|
|
//! resume iteration, and so calling [`next`] again may or may not eventually
|
2016-04-18 13:08:27 -05:00
|
|
|
//! start returning `Some(Item)` again at some point.
|
|
|
|
//!
|
|
|
|
//! [`Iterator`]'s full definition includes a number of other methods as well,
|
2017-03-12 13:04:52 -05:00
|
|
|
//! but they are default methods, built on top of [`next`], and so you get
|
2016-04-18 13:08:27 -05:00
|
|
|
//! them for free.
|
|
|
|
//!
|
|
|
|
//! Iterators are also composable, and it's common to chain them together to do
|
|
|
|
//! more complex forms of processing. See the [Adapters](#adapters) section
|
|
|
|
//! below for more details.
|
|
|
|
//!
|
|
|
|
//! [`Iterator`]: trait.Iterator.html
|
2017-03-12 13:04:52 -05:00
|
|
|
//! [`next`]: trait.Iterator.html#tymethod.next
|
2016-04-18 13:08:27 -05:00
|
|
|
//! [`Option`]: ../../std/option/enum.Option.html
|
|
|
|
//!
|
|
|
|
//! # The three forms of iteration
|
|
|
|
//!
|
|
|
|
//! There are three common methods which can create iterators from a collection:
|
|
|
|
//!
|
|
|
|
//! * `iter()`, which iterates over `&T`.
|
|
|
|
//! * `iter_mut()`, which iterates over `&mut T`.
|
|
|
|
//! * `into_iter()`, which iterates over `T`.
|
|
|
|
//!
|
|
|
|
//! Various things in the standard library may implement one or more of the
|
|
|
|
//! three, where appropriate.
|
|
|
|
//!
|
|
|
|
//! # Implementing Iterator
|
|
|
|
//!
|
|
|
|
//! Creating an iterator of your own involves two steps: creating a `struct` to
|
|
|
|
//! hold the iterator's state, and then `impl`ementing [`Iterator`] for that
|
|
|
|
//! `struct`. This is why there are so many `struct`s in this module: there is
|
|
|
|
//! one for each iterator and iterator adapter.
|
|
|
|
//!
|
|
|
|
//! Let's make an iterator named `Counter` which counts from `1` to `5`:
|
|
|
|
//!
|
|
|
|
//! ```
|
|
|
|
//! // First, the struct:
|
|
|
|
//!
|
|
|
|
//! /// An iterator which counts from one to five
|
|
|
|
//! struct Counter {
|
|
|
|
//! count: usize,
|
|
|
|
//! }
|
|
|
|
//!
|
|
|
|
//! // we want our count to start at one, so let's add a new() method to help.
|
|
|
|
//! // This isn't strictly necessary, but is convenient. Note that we start
|
|
|
|
//! // `count` at zero, we'll see why in `next()`'s implementation below.
|
|
|
|
//! impl Counter {
|
|
|
|
//! fn new() -> Counter {
|
|
|
|
//! Counter { count: 0 }
|
|
|
|
//! }
|
|
|
|
//! }
|
|
|
|
//!
|
|
|
|
//! // Then, we implement `Iterator` for our `Counter`:
|
|
|
|
//!
|
|
|
|
//! impl Iterator for Counter {
|
|
|
|
//! // we will be counting with usize
|
|
|
|
//! type Item = usize;
|
|
|
|
//!
|
|
|
|
//! // next() is the only required method
|
|
|
|
//! fn next(&mut self) -> Option<usize> {
|
|
|
|
//! // increment our count. This is why we started at zero.
|
|
|
|
//! self.count += 1;
|
|
|
|
//!
|
|
|
|
//! // check to see if we've finished counting or not.
|
|
|
|
//! if self.count < 6 {
|
|
|
|
//! Some(self.count)
|
|
|
|
//! } else {
|
|
|
|
//! None
|
|
|
|
//! }
|
|
|
|
//! }
|
|
|
|
//! }
|
|
|
|
//!
|
|
|
|
//! // And now we can use it!
|
|
|
|
//!
|
|
|
|
//! let mut counter = Counter::new();
|
|
|
|
//!
|
|
|
|
//! let x = counter.next().unwrap();
|
|
|
|
//! println!("{}", x);
|
|
|
|
//!
|
|
|
|
//! let x = counter.next().unwrap();
|
|
|
|
//! println!("{}", x);
|
|
|
|
//!
|
|
|
|
//! let x = counter.next().unwrap();
|
|
|
|
//! println!("{}", x);
|
|
|
|
//!
|
|
|
|
//! let x = counter.next().unwrap();
|
|
|
|
//! println!("{}", x);
|
|
|
|
//!
|
|
|
|
//! let x = counter.next().unwrap();
|
|
|
|
//! println!("{}", x);
|
|
|
|
//! ```
|
|
|
|
//!
|
|
|
|
//! This will print `1` through `5`, each on their own line.
|
|
|
|
//!
|
|
|
|
//! Calling `next()` this way gets repetitive. Rust has a construct which can
|
|
|
|
//! call `next()` on your iterator, until it reaches `None`. Let's go over that
|
|
|
|
//! next.
|
|
|
|
//!
|
|
|
|
//! # for Loops and IntoIterator
|
|
|
|
//!
|
|
|
|
//! Rust's `for` loop syntax is actually sugar for iterators. Here's a basic
|
|
|
|
//! example of `for`:
|
|
|
|
//!
|
|
|
|
//! ```
|
|
|
|
//! let values = vec![1, 2, 3, 4, 5];
|
|
|
|
//!
|
|
|
|
//! for x in values {
|
|
|
|
//! println!("{}", x);
|
|
|
|
//! }
|
|
|
|
//! ```
|
|
|
|
//!
|
|
|
|
//! This will print the numbers one through five, each on their own line. But
|
|
|
|
//! you'll notice something here: we never called anything on our vector to
|
|
|
|
//! produce an iterator. What gives?
|
|
|
|
//!
|
|
|
|
//! There's a trait in the standard library for converting something into an
|
2017-03-12 13:04:52 -05:00
|
|
|
//! iterator: [`IntoIterator`]. This trait has one method, [`into_iter`],
|
2016-04-18 13:08:27 -05:00
|
|
|
//! which converts the thing implementing [`IntoIterator`] into an iterator.
|
|
|
|
//! Let's take a look at that `for` loop again, and what the compiler converts
|
|
|
|
//! it into:
|
|
|
|
//!
|
|
|
|
//! [`IntoIterator`]: trait.IntoIterator.html
|
2017-03-12 13:04:52 -05:00
|
|
|
//! [`into_iter`]: trait.IntoIterator.html#tymethod.into_iter
|
2016-04-18 13:08:27 -05:00
|
|
|
//!
|
|
|
|
//! ```
|
|
|
|
//! let values = vec![1, 2, 3, 4, 5];
|
|
|
|
//!
|
|
|
|
//! for x in values {
|
|
|
|
//! println!("{}", x);
|
|
|
|
//! }
|
|
|
|
//! ```
|
|
|
|
//!
|
|
|
|
//! Rust de-sugars this into:
|
|
|
|
//!
|
|
|
|
//! ```
|
|
|
|
//! let values = vec![1, 2, 3, 4, 5];
|
|
|
|
//! {
|
|
|
|
//! let result = match IntoIterator::into_iter(values) {
|
|
|
|
//! mut iter => loop {
|
2017-06-13 11:36:01 -05:00
|
|
|
//! let next;
|
|
|
|
//! match iter.next() {
|
|
|
|
//! Some(val) => next = val,
|
2016-04-18 13:08:27 -05:00
|
|
|
//! None => break,
|
2017-05-27 13:20:17 -05:00
|
|
|
//! };
|
2017-06-13 11:36:01 -05:00
|
|
|
//! let x = next;
|
2017-05-27 13:20:17 -05:00
|
|
|
//! let () = { println!("{}", x); };
|
2016-04-18 13:08:27 -05:00
|
|
|
//! },
|
|
|
|
//! };
|
|
|
|
//! result
|
|
|
|
//! }
|
|
|
|
//! ```
|
|
|
|
//!
|
|
|
|
//! First, we call `into_iter()` on the value. Then, we match on the iterator
|
2017-03-12 13:04:52 -05:00
|
|
|
//! that returns, calling [`next`] over and over until we see a `None`. At
|
2016-04-18 13:08:27 -05:00
|
|
|
//! that point, we `break` out of the loop, and we're done iterating.
|
|
|
|
//!
|
|
|
|
//! There's one more subtle bit here: the standard library contains an
|
|
|
|
//! interesting implementation of [`IntoIterator`]:
|
|
|
|
//!
|
2017-06-20 02:15:16 -05:00
|
|
|
//! ```ignore (only-for-syntax-highlight)
|
2016-04-18 13:08:27 -05:00
|
|
|
//! impl<I: Iterator> IntoIterator for I
|
|
|
|
//! ```
|
|
|
|
//!
|
|
|
|
//! In other words, all [`Iterator`]s implement [`IntoIterator`], by just
|
|
|
|
//! returning themselves. This means two things:
|
|
|
|
//!
|
|
|
|
//! 1. If you're writing an [`Iterator`], you can use it with a `for` loop.
|
|
|
|
//! 2. If you're creating a collection, implementing [`IntoIterator`] for it
|
|
|
|
//! will allow your collection to be used with the `for` loop.
|
|
|
|
//!
|
|
|
|
//! # Adapters
|
|
|
|
//!
|
|
|
|
//! Functions which take an [`Iterator`] and return another [`Iterator`] are
|
|
|
|
//! often called 'iterator adapters', as they're a form of the 'adapter
|
|
|
|
//! pattern'.
|
|
|
|
//!
|
2017-03-12 13:04:52 -05:00
|
|
|
//! Common iterator adapters include [`map`], [`take`], and [`filter`].
|
2016-04-18 13:08:27 -05:00
|
|
|
//! For more, see their documentation.
|
|
|
|
//!
|
2017-03-12 13:04:52 -05:00
|
|
|
//! [`map`]: trait.Iterator.html#method.map
|
|
|
|
//! [`take`]: trait.Iterator.html#method.take
|
|
|
|
//! [`filter`]: trait.Iterator.html#method.filter
|
2016-04-18 13:08:27 -05:00
|
|
|
//!
|
|
|
|
//! # Laziness
|
|
|
|
//!
|
|
|
|
//! Iterators (and iterator [adapters](#adapters)) are *lazy*. This means that
|
|
|
|
//! just creating an iterator doesn't _do_ a whole lot. Nothing really happens
|
2017-03-12 13:04:52 -05:00
|
|
|
//! until you call [`next`]. This is sometimes a source of confusion when
|
|
|
|
//! creating an iterator solely for its side effects. For example, the [`map`]
|
2016-04-18 13:08:27 -05:00
|
|
|
//! method calls a closure on each element it iterates over:
|
|
|
|
//!
|
|
|
|
//! ```
|
|
|
|
//! # #![allow(unused_must_use)]
|
|
|
|
//! let v = vec![1, 2, 3, 4, 5];
|
|
|
|
//! v.iter().map(|x| println!("{}", x));
|
|
|
|
//! ```
|
|
|
|
//!
|
|
|
|
//! This will not print any values, as we only created an iterator, rather than
|
|
|
|
//! using it. The compiler will warn us about this kind of behavior:
|
|
|
|
//!
|
|
|
|
//! ```text
|
|
|
|
//! warning: unused result which must be used: iterator adaptors are lazy and
|
|
|
|
//! do nothing unless consumed
|
|
|
|
//! ```
|
|
|
|
//!
|
2017-03-12 13:04:52 -05:00
|
|
|
//! The idiomatic way to write a [`map`] for its side effects is to use a
|
2016-04-18 13:08:27 -05:00
|
|
|
//! `for` loop instead:
|
|
|
|
//!
|
|
|
|
//! ```
|
|
|
|
//! let v = vec![1, 2, 3, 4, 5];
|
|
|
|
//!
|
|
|
|
//! for x in &v {
|
|
|
|
//! println!("{}", x);
|
|
|
|
//! }
|
|
|
|
//! ```
|
|
|
|
//!
|
2017-03-12 13:04:52 -05:00
|
|
|
//! [`map`]: trait.Iterator.html#method.map
|
2016-04-18 13:08:27 -05:00
|
|
|
//!
|
|
|
|
//! The two most common ways to evaluate an iterator are to use a `for` loop
|
2017-03-12 13:04:52 -05:00
|
|
|
//! like this, or using the [`collect`] method to produce a new collection.
|
2016-04-18 13:08:27 -05:00
|
|
|
//!
|
2017-03-12 13:04:52 -05:00
|
|
|
//! [`collect`]: trait.Iterator.html#method.collect
|
2016-04-18 13:08:27 -05:00
|
|
|
//!
|
|
|
|
//! # Infinity
|
|
|
|
//!
|
|
|
|
//! Iterators do not have to be finite. As an example, an open-ended range is
|
|
|
|
//! an infinite iterator:
|
|
|
|
//!
|
|
|
|
//! ```
|
|
|
|
//! let numbers = 0..;
|
|
|
|
//! ```
|
|
|
|
//!
|
2017-03-12 13:04:52 -05:00
|
|
|
//! It is common to use the [`take`] iterator adapter to turn an infinite
|
2016-04-18 13:08:27 -05:00
|
|
|
//! iterator into a finite one:
|
|
|
|
//!
|
|
|
|
//! ```
|
|
|
|
//! let numbers = 0..;
|
|
|
|
//! let five_numbers = numbers.take(5);
|
|
|
|
//!
|
|
|
|
//! for number in five_numbers {
|
|
|
|
//! println!("{}", number);
|
|
|
|
//! }
|
|
|
|
//! ```
|
|
|
|
//!
|
|
|
|
//! This will print the numbers `0` through `4`, each on their own line.
|
2018-01-18 11:49:32 -06:00
|
|
|
//!
|
2018-01-18 09:28:10 -06:00
|
|
|
//! Bear in mind that methods on infinite iterators, even those for which a
|
2018-01-19 15:16:34 -06:00
|
|
|
//! result can be determined mathematically in finite time, may not terminate.
|
|
|
|
//! Specifically, methods such as [`min`], which in the general case require
|
|
|
|
//! traversing every element in the iterator, are likely not to return
|
|
|
|
//! successfully for any infinite iterators.
|
2018-01-18 11:49:32 -06:00
|
|
|
//!
|
|
|
|
//! ```no_run
|
2018-01-18 09:28:10 -06:00
|
|
|
//! let positives = 1..;
|
|
|
|
//! let least = positives.min().unwrap(); // Oh no! An infinite loop!
|
2018-01-19 15:16:34 -06:00
|
|
|
//! // `positives.min` will either overflow and panic (in debug mode),
|
|
|
|
//! // or cause an infinite loop (in release mode), so we won't reach
|
|
|
|
//! // this point!
|
2018-01-18 09:28:10 -06:00
|
|
|
//! println!("The least positive number is {}.", least);
|
|
|
|
//! ```
|
2016-04-18 13:08:27 -05:00
|
|
|
//!
|
2017-03-12 13:04:52 -05:00
|
|
|
//! [`take`]: trait.Iterator.html#method.take
|
2018-01-18 09:28:10 -06:00
|
|
|
//! [`min`]: trait.Iterator.html#method.min
|
2016-04-18 13:08:27 -05:00
|
|
|
|
|
|
|
#![stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
|
|
|
|
use cmp;
|
|
|
|
use fmt;
|
2016-04-21 14:35:39 -05:00
|
|
|
use iter_private::TrustedRandomAccess;
|
2017-10-23 00:47:27 -05:00
|
|
|
use ops::Try;
|
2016-04-18 13:08:27 -05:00
|
|
|
use usize;
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
pub use self::iterator::Iterator;
|
|
|
|
|
|
|
|
#[unstable(feature = "step_trait",
|
|
|
|
reason = "likely to be replaced by finer-grained traits",
|
2017-05-23 05:08:18 -05:00
|
|
|
issue = "42168")]
|
2016-04-18 13:08:27 -05:00
|
|
|
pub use self::range::Step;
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
pub use self::sources::{Repeat, repeat};
|
|
|
|
#[stable(feature = "iter_empty", since = "1.2.0")]
|
|
|
|
pub use self::sources::{Empty, empty};
|
|
|
|
#[stable(feature = "iter_once", since = "1.2.0")]
|
|
|
|
pub use self::sources::{Once, once};
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
2016-06-28 10:56:56 -05:00
|
|
|
pub use self::traits::{FromIterator, IntoIterator, DoubleEndedIterator, Extend};
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
pub use self::traits::{ExactSizeIterator, Sum, Product};
|
2016-08-13 13:42:36 -05:00
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
pub use self::traits::FusedIterator;
|
2016-11-03 18:24:59 -05:00
|
|
|
#[unstable(feature = "trusted_len", issue = "37572")]
|
2016-10-20 07:07:06 -05:00
|
|
|
pub use self::traits::TrustedLen;
|
2016-04-18 13:08:27 -05:00
|
|
|
|
|
|
|
mod iterator;
|
|
|
|
mod range;
|
|
|
|
mod sources;
|
|
|
|
mod traits;
|
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
/// Transparent newtype used to implement foo methods in terms of try_foo.
|
|
|
|
/// Important until #43278 is fixed; might be better as `Result<T, !>` later.
|
|
|
|
struct AlwaysOk<T>(pub T);
|
|
|
|
|
|
|
|
impl<T> Try for AlwaysOk<T> {
|
|
|
|
type Ok = T;
|
|
|
|
type Error = !;
|
|
|
|
#[inline]
|
|
|
|
fn into_result(self) -> Result<Self::Ok, Self::Error> { Ok(self.0) }
|
|
|
|
#[inline]
|
|
|
|
fn from_error(v: Self::Error) -> Self { v }
|
|
|
|
#[inline]
|
|
|
|
fn from_ok(v: Self::Ok) -> Self { AlwaysOk(v) }
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Used to make try_fold closures more like normal loops
|
|
|
|
#[derive(PartialEq)]
|
|
|
|
enum LoopState<C, B> {
|
|
|
|
Continue(C),
|
|
|
|
Break(B),
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<C, B> Try for LoopState<C, B> {
|
|
|
|
type Ok = C;
|
|
|
|
type Error = B;
|
|
|
|
#[inline]
|
|
|
|
fn into_result(self) -> Result<Self::Ok, Self::Error> {
|
|
|
|
match self {
|
|
|
|
LoopState::Continue(y) => Ok(y),
|
|
|
|
LoopState::Break(x) => Err(x),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[inline]
|
|
|
|
fn from_error(v: Self::Error) -> Self { LoopState::Break(v) }
|
|
|
|
#[inline]
|
|
|
|
fn from_ok(v: Self::Ok) -> Self { LoopState::Continue(v) }
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<C, B> LoopState<C, B> {
|
|
|
|
#[inline]
|
|
|
|
fn break_value(self) -> Option<B> {
|
|
|
|
match self {
|
|
|
|
LoopState::Continue(..) => None,
|
|
|
|
LoopState::Break(x) => Some(x),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<R: Try> LoopState<R::Ok, R> {
|
|
|
|
#[inline]
|
|
|
|
fn from_try(r: R) -> Self {
|
|
|
|
match Try::into_result(r) {
|
|
|
|
Ok(v) => LoopState::Continue(v),
|
|
|
|
Err(v) => LoopState::Break(Try::from_error(v)),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[inline]
|
|
|
|
fn into_try(self) -> R {
|
|
|
|
match self {
|
|
|
|
LoopState::Continue(v) => Try::from_ok(v),
|
|
|
|
LoopState::Break(v) => v,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-12 18:24:17 -05:00
|
|
|
/// A double-ended iterator with the direction inverted.
|
2016-04-18 13:08:27 -05:00
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// This `struct` is created by the [`rev`] method on [`Iterator`]. See its
|
2016-04-18 13:08:27 -05:00
|
|
|
/// documentation for more.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`rev`]: trait.Iterator.html#method.rev
|
2016-04-18 13:08:27 -05:00
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
pub struct Rev<T> {
|
|
|
|
iter: T
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I> Iterator for Rev<I> where I: DoubleEndedIterator {
|
|
|
|
type Item = <I as Iterator>::Item;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn next(&mut self) -> Option<<I as Iterator>::Item> { self.iter.next_back() }
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
|
2017-04-02 19:03:12 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R where
|
|
|
|
Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try<Ok=B>
|
|
|
|
{
|
|
|
|
self.iter.try_rfold(init, f)
|
|
|
|
}
|
|
|
|
|
2017-09-18 14:24:33 -05:00
|
|
|
fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
|
|
|
|
where F: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
self.iter.rfold(init, f)
|
|
|
|
}
|
|
|
|
|
2017-07-05 13:23:55 -05:00
|
|
|
#[inline]
|
2017-04-02 19:03:12 -05:00
|
|
|
fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
|
|
|
|
where P: FnMut(&Self::Item) -> bool
|
|
|
|
{
|
|
|
|
self.iter.rfind(predicate)
|
|
|
|
}
|
2017-07-05 13:23:55 -05:00
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn rposition<P>(&mut self, predicate: P) -> Option<usize> where
|
|
|
|
P: FnMut(Self::Item) -> bool
|
|
|
|
{
|
|
|
|
self.iter.position(predicate)
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I> DoubleEndedIterator for Rev<I> where I: DoubleEndedIterator {
|
|
|
|
#[inline]
|
|
|
|
fn next_back(&mut self) -> Option<<I as Iterator>::Item> { self.iter.next() }
|
2017-04-02 19:03:12 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
fn try_rfold<B, F, R>(&mut self, init: B, f: F) -> R where
|
|
|
|
Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try<Ok=B>
|
|
|
|
{
|
|
|
|
self.iter.try_fold(init, f)
|
|
|
|
}
|
|
|
|
|
2017-09-18 14:24:33 -05:00
|
|
|
fn rfold<Acc, F>(self, init: Acc, f: F) -> Acc
|
|
|
|
where F: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
self.iter.fold(init, f)
|
|
|
|
}
|
|
|
|
|
2017-04-02 19:03:12 -05:00
|
|
|
fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
|
|
|
|
where P: FnMut(&Self::Item) -> bool
|
|
|
|
{
|
|
|
|
self.iter.find(predicate)
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
2016-04-18 16:44:02 -05:00
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I> ExactSizeIterator for Rev<I>
|
2016-11-22 16:48:33 -06:00
|
|
|
where I: ExactSizeIterator + DoubleEndedIterator
|
|
|
|
{
|
|
|
|
fn len(&self) -> usize {
|
|
|
|
self.iter.len()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn is_empty(&self) -> bool {
|
|
|
|
self.iter.is_empty()
|
|
|
|
}
|
|
|
|
}
|
2016-04-18 16:44:02 -05:00
|
|
|
|
2016-08-13 13:42:36 -05:00
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
impl<I> FusedIterator for Rev<I>
|
|
|
|
where I: FusedIterator + DoubleEndedIterator {}
|
|
|
|
|
2016-11-03 18:24:59 -05:00
|
|
|
#[unstable(feature = "trusted_len", issue = "37572")]
|
2016-10-20 07:07:06 -05:00
|
|
|
unsafe impl<I> TrustedLen for Rev<I>
|
|
|
|
where I: TrustedLen + DoubleEndedIterator {}
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
/// An iterator that clones the elements of an underlying iterator.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// This `struct` is created by the [`cloned`] method on [`Iterator`]. See its
|
2016-04-18 13:08:27 -05:00
|
|
|
/// documentation for more.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`cloned`]: trait.Iterator.html#method.cloned
|
2016-04-18 13:08:27 -05:00
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
#[stable(feature = "iter_cloned", since = "1.1.0")]
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
pub struct Cloned<I> {
|
|
|
|
it: I,
|
|
|
|
}
|
|
|
|
|
2016-09-28 05:28:42 -05:00
|
|
|
#[stable(feature = "iter_cloned", since = "1.1.0")]
|
2016-04-18 13:08:27 -05:00
|
|
|
impl<'a, I, T: 'a> Iterator for Cloned<I>
|
|
|
|
where I: Iterator<Item=&'a T>, T: Clone
|
|
|
|
{
|
|
|
|
type Item = T;
|
|
|
|
|
|
|
|
fn next(&mut self) -> Option<T> {
|
|
|
|
self.it.next().cloned()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
self.it.size_hint()
|
|
|
|
}
|
2016-10-25 08:50:52 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R where
|
|
|
|
Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try<Ok=B>
|
|
|
|
{
|
|
|
|
self.it.try_fold(init, move |acc, elt| f(acc, elt.clone()))
|
|
|
|
}
|
|
|
|
|
2016-10-25 08:50:52 -05:00
|
|
|
fn fold<Acc, F>(self, init: Acc, mut f: F) -> Acc
|
|
|
|
where F: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
self.it.fold(init, move |acc, elt| f(acc, elt.clone()))
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
2016-09-28 05:28:42 -05:00
|
|
|
#[stable(feature = "iter_cloned", since = "1.1.0")]
|
2016-04-18 13:08:27 -05:00
|
|
|
impl<'a, I, T: 'a> DoubleEndedIterator for Cloned<I>
|
|
|
|
where I: DoubleEndedIterator<Item=&'a T>, T: Clone
|
|
|
|
{
|
|
|
|
fn next_back(&mut self) -> Option<T> {
|
|
|
|
self.it.next_back().cloned()
|
|
|
|
}
|
2017-09-18 14:27:19 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R where
|
|
|
|
Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try<Ok=B>
|
|
|
|
{
|
|
|
|
self.it.try_rfold(init, move |acc, elt| f(acc, elt.clone()))
|
|
|
|
}
|
|
|
|
|
2017-09-18 14:27:19 -05:00
|
|
|
fn rfold<Acc, F>(self, init: Acc, mut f: F) -> Acc
|
|
|
|
where F: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
self.it.rfold(init, move |acc, elt| f(acc, elt.clone()))
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
2016-09-28 05:28:42 -05:00
|
|
|
#[stable(feature = "iter_cloned", since = "1.1.0")]
|
2016-04-18 13:08:27 -05:00
|
|
|
impl<'a, I, T: 'a> ExactSizeIterator for Cloned<I>
|
|
|
|
where I: ExactSizeIterator<Item=&'a T>, T: Clone
|
2016-11-22 16:48:33 -06:00
|
|
|
{
|
|
|
|
fn len(&self) -> usize {
|
|
|
|
self.it.len()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn is_empty(&self) -> bool {
|
|
|
|
self.it.is_empty()
|
|
|
|
}
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
|
2016-08-13 13:42:36 -05:00
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
impl<'a, I, T: 'a> FusedIterator for Cloned<I>
|
|
|
|
where I: FusedIterator<Item=&'a T>, T: Clone
|
|
|
|
{}
|
|
|
|
|
2016-10-17 03:58:21 -05:00
|
|
|
#[doc(hidden)]
|
2017-09-23 11:03:24 -05:00
|
|
|
default unsafe impl<'a, I, T: 'a> TrustedRandomAccess for Cloned<I>
|
2016-10-17 03:58:21 -05:00
|
|
|
where I: TrustedRandomAccess<Item=&'a T>, T: Clone
|
|
|
|
{
|
|
|
|
unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item {
|
|
|
|
self.it.get_unchecked(i).clone()
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn may_have_side_effect() -> bool { true }
|
|
|
|
}
|
|
|
|
|
2017-09-23 11:03:24 -05:00
|
|
|
#[doc(hidden)]
|
|
|
|
unsafe impl<'a, I, T: 'a> TrustedRandomAccess for Cloned<I>
|
|
|
|
where I: TrustedRandomAccess<Item=&'a T>, T: Copy
|
|
|
|
{
|
|
|
|
unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item {
|
|
|
|
*self.it.get_unchecked(i)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn may_have_side_effect() -> bool { false }
|
|
|
|
}
|
|
|
|
|
2016-11-03 18:24:59 -05:00
|
|
|
#[unstable(feature = "trusted_len", issue = "37572")]
|
2016-10-20 07:07:06 -05:00
|
|
|
unsafe impl<'a, I, T: 'a> TrustedLen for Cloned<I>
|
|
|
|
where I: TrustedLen<Item=&'a T>,
|
|
|
|
T: Clone
|
|
|
|
{}
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
/// An iterator that repeats endlessly.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// This `struct` is created by the [`cycle`] method on [`Iterator`]. See its
|
2016-04-18 13:08:27 -05:00
|
|
|
/// documentation for more.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`cycle`]: trait.Iterator.html#method.cycle
|
2016-04-18 13:08:27 -05:00
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
pub struct Cycle<I> {
|
|
|
|
orig: I,
|
|
|
|
iter: I,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I> Iterator for Cycle<I> where I: Clone + Iterator {
|
|
|
|
type Item = <I as Iterator>::Item;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn next(&mut self) -> Option<<I as Iterator>::Item> {
|
|
|
|
match self.iter.next() {
|
|
|
|
None => { self.iter = self.orig.clone(); self.iter.next() }
|
|
|
|
y => y
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
// the cycle iterator is either empty or infinite
|
|
|
|
match self.orig.size_hint() {
|
|
|
|
sz @ (0, Some(0)) => sz,
|
|
|
|
(0, _) => (0, None),
|
|
|
|
_ => (usize::MAX, None)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-13 13:42:36 -05:00
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
impl<I> FusedIterator for Cycle<I> where I: Clone + Iterator {}
|
|
|
|
|
2017-09-25 10:41:39 -05:00
|
|
|
/// An iterator for stepping iterators by a custom amount.
|
2017-05-12 13:11:15 -05:00
|
|
|
///
|
|
|
|
/// This `struct` is created by the [`step_by`] method on [`Iterator`]. See
|
|
|
|
/// its documentation for more.
|
|
|
|
///
|
|
|
|
/// [`step_by`]: trait.Iterator.html#method.step_by
|
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[unstable(feature = "iterator_step_by",
|
|
|
|
reason = "unstable replacement of Range::step_by",
|
|
|
|
issue = "27741")]
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
pub struct StepBy<I> {
|
|
|
|
iter: I,
|
|
|
|
step: usize,
|
|
|
|
first_take: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[unstable(feature = "iterator_step_by",
|
|
|
|
reason = "unstable replacement of Range::step_by",
|
|
|
|
issue = "27741")]
|
|
|
|
impl<I> Iterator for StepBy<I> where I: Iterator {
|
|
|
|
type Item = I::Item;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
|
|
|
if self.first_take {
|
|
|
|
self.first_take = false;
|
|
|
|
self.iter.next()
|
|
|
|
} else {
|
|
|
|
self.iter.nth(self.step)
|
|
|
|
}
|
|
|
|
}
|
2017-05-23 04:24:25 -05:00
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
let inner_hint = self.iter.size_hint();
|
|
|
|
|
|
|
|
if self.first_take {
|
|
|
|
let f = |n| if n == 0 { 0 } else { 1 + (n-1)/(self.step+1) };
|
|
|
|
(f(inner_hint.0), inner_hint.1.map(f))
|
|
|
|
} else {
|
|
|
|
let f = |n| n / (self.step+1);
|
|
|
|
(f(inner_hint.0), inner_hint.1.map(f))
|
|
|
|
}
|
|
|
|
}
|
2017-05-12 13:11:15 -05:00
|
|
|
}
|
|
|
|
|
2017-05-23 04:24:25 -05:00
|
|
|
// StepBy can only make the iterator shorter, so the len will still fit.
|
|
|
|
#[unstable(feature = "iterator_step_by",
|
|
|
|
reason = "unstable replacement of Range::step_by",
|
|
|
|
issue = "27741")]
|
|
|
|
impl<I> ExactSizeIterator for StepBy<I> where I: ExactSizeIterator {}
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
/// An iterator that strings two iterators together.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// This `struct` is created by the [`chain`] method on [`Iterator`]. See its
|
2016-04-18 13:08:27 -05:00
|
|
|
/// documentation for more.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`chain`]: trait.Iterator.html#method.chain
|
2016-04-18 13:08:27 -05:00
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
pub struct Chain<A, B> {
|
|
|
|
a: A,
|
|
|
|
b: B,
|
|
|
|
state: ChainState,
|
|
|
|
}
|
|
|
|
|
|
|
|
// The iterator protocol specifies that iteration ends with the return value
|
|
|
|
// `None` from `.next()` (or `.next_back()`) and it is unspecified what
|
|
|
|
// further calls return. The chain adaptor must account for this since it uses
|
|
|
|
// two subiterators.
|
|
|
|
//
|
|
|
|
// It uses three states:
|
|
|
|
//
|
|
|
|
// - Both: `a` and `b` are remaining
|
|
|
|
// - Front: `a` remaining
|
|
|
|
// - Back: `b` remaining
|
|
|
|
//
|
|
|
|
// The fourth state (neither iterator is remaining) only occurs after Chain has
|
|
|
|
// returned None once, so we don't need to store this state.
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
enum ChainState {
|
|
|
|
// both front and back iterator are remaining
|
|
|
|
Both,
|
|
|
|
// only front is remaining
|
|
|
|
Front,
|
|
|
|
// only back is remaining
|
|
|
|
Back,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<A, B> Iterator for Chain<A, B> where
|
|
|
|
A: Iterator,
|
|
|
|
B: Iterator<Item = A::Item>
|
|
|
|
{
|
|
|
|
type Item = A::Item;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn next(&mut self) -> Option<A::Item> {
|
|
|
|
match self.state {
|
|
|
|
ChainState::Both => match self.a.next() {
|
|
|
|
elt @ Some(..) => elt,
|
|
|
|
None => {
|
|
|
|
self.state = ChainState::Back;
|
|
|
|
self.b.next()
|
|
|
|
}
|
|
|
|
},
|
|
|
|
ChainState::Front => self.a.next(),
|
|
|
|
ChainState::Back => self.b.next(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
2016-05-26 11:02:26 -05:00
|
|
|
#[rustc_inherit_overflow_checks]
|
2016-04-18 13:08:27 -05:00
|
|
|
fn count(self) -> usize {
|
|
|
|
match self.state {
|
|
|
|
ChainState::Both => self.a.count() + self.b.count(),
|
|
|
|
ChainState::Front => self.a.count(),
|
|
|
|
ChainState::Back => self.b.count(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
fn try_fold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R where
|
|
|
|
Self: Sized, F: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
let mut accum = init;
|
|
|
|
match self.state {
|
|
|
|
ChainState::Both | ChainState::Front => {
|
|
|
|
accum = self.a.try_fold(accum, &mut f)?;
|
|
|
|
if let ChainState::Both = self.state {
|
|
|
|
self.state = ChainState::Back;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => { }
|
|
|
|
}
|
2017-11-05 00:52:45 -05:00
|
|
|
if let ChainState::Back = self.state {
|
|
|
|
accum = self.b.try_fold(accum, &mut f)?;
|
2017-10-23 00:47:27 -05:00
|
|
|
}
|
|
|
|
Try::from_ok(accum)
|
|
|
|
}
|
|
|
|
|
2016-10-25 08:21:49 -05:00
|
|
|
fn fold<Acc, F>(self, init: Acc, mut f: F) -> Acc
|
|
|
|
where F: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
let mut accum = init;
|
|
|
|
match self.state {
|
|
|
|
ChainState::Both | ChainState::Front => {
|
|
|
|
accum = self.a.fold(accum, &mut f);
|
|
|
|
}
|
|
|
|
_ => { }
|
|
|
|
}
|
|
|
|
match self.state {
|
|
|
|
ChainState::Both | ChainState::Back => {
|
|
|
|
accum = self.b.fold(accum, &mut f);
|
|
|
|
}
|
|
|
|
_ => { }
|
|
|
|
}
|
|
|
|
accum
|
|
|
|
}
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
#[inline]
|
|
|
|
fn nth(&mut self, mut n: usize) -> Option<A::Item> {
|
|
|
|
match self.state {
|
|
|
|
ChainState::Both | ChainState::Front => {
|
|
|
|
for x in self.a.by_ref() {
|
|
|
|
if n == 0 {
|
|
|
|
return Some(x)
|
|
|
|
}
|
|
|
|
n -= 1;
|
|
|
|
}
|
|
|
|
if let ChainState::Both = self.state {
|
|
|
|
self.state = ChainState::Back;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ChainState::Back => {}
|
|
|
|
}
|
|
|
|
if let ChainState::Back = self.state {
|
|
|
|
self.b.nth(n)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-30 04:16:30 -05:00
|
|
|
#[inline]
|
|
|
|
fn find<P>(&mut self, mut predicate: P) -> Option<Self::Item> where
|
|
|
|
P: FnMut(&Self::Item) -> bool,
|
|
|
|
{
|
|
|
|
match self.state {
|
|
|
|
ChainState::Both => match self.a.find(&mut predicate) {
|
|
|
|
None => {
|
|
|
|
self.state = ChainState::Back;
|
|
|
|
self.b.find(predicate)
|
|
|
|
}
|
|
|
|
v => v
|
|
|
|
},
|
|
|
|
ChainState::Front => self.a.find(predicate),
|
|
|
|
ChainState::Back => self.b.find(predicate),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
#[inline]
|
|
|
|
fn last(self) -> Option<A::Item> {
|
|
|
|
match self.state {
|
|
|
|
ChainState::Both => {
|
|
|
|
// Must exhaust a before b.
|
|
|
|
let a_last = self.a.last();
|
|
|
|
let b_last = self.b.last();
|
|
|
|
b_last.or(a_last)
|
|
|
|
},
|
|
|
|
ChainState::Front => self.a.last(),
|
|
|
|
ChainState::Back => self.b.last()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
let (a_lower, a_upper) = self.a.size_hint();
|
|
|
|
let (b_lower, b_upper) = self.b.size_hint();
|
|
|
|
|
|
|
|
let lower = a_lower.saturating_add(b_lower);
|
|
|
|
|
|
|
|
let upper = match (a_upper, b_upper) {
|
|
|
|
(Some(x), Some(y)) => x.checked_add(y),
|
|
|
|
_ => None
|
|
|
|
};
|
|
|
|
|
|
|
|
(lower, upper)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<A, B> DoubleEndedIterator for Chain<A, B> where
|
|
|
|
A: DoubleEndedIterator,
|
|
|
|
B: DoubleEndedIterator<Item=A::Item>,
|
|
|
|
{
|
|
|
|
#[inline]
|
|
|
|
fn next_back(&mut self) -> Option<A::Item> {
|
|
|
|
match self.state {
|
|
|
|
ChainState::Both => match self.b.next_back() {
|
|
|
|
elt @ Some(..) => elt,
|
|
|
|
None => {
|
|
|
|
self.state = ChainState::Front;
|
|
|
|
self.a.next_back()
|
|
|
|
}
|
|
|
|
},
|
|
|
|
ChainState::Front => self.a.next_back(),
|
|
|
|
ChainState::Back => self.b.next_back(),
|
|
|
|
}
|
|
|
|
}
|
2017-09-18 14:27:19 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
fn try_rfold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R where
|
|
|
|
Self: Sized, F: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
let mut accum = init;
|
|
|
|
match self.state {
|
|
|
|
ChainState::Both | ChainState::Back => {
|
|
|
|
accum = self.b.try_rfold(accum, &mut f)?;
|
|
|
|
if let ChainState::Both = self.state {
|
|
|
|
self.state = ChainState::Front;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => { }
|
|
|
|
}
|
2017-11-05 00:52:45 -05:00
|
|
|
if let ChainState::Front = self.state {
|
|
|
|
accum = self.a.try_rfold(accum, &mut f)?;
|
2017-10-23 00:47:27 -05:00
|
|
|
}
|
|
|
|
Try::from_ok(accum)
|
|
|
|
}
|
|
|
|
|
2017-09-18 14:27:19 -05:00
|
|
|
fn rfold<Acc, F>(self, init: Acc, mut f: F) -> Acc
|
|
|
|
where F: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
let mut accum = init;
|
|
|
|
match self.state {
|
|
|
|
ChainState::Both | ChainState::Back => {
|
|
|
|
accum = self.b.rfold(accum, &mut f);
|
|
|
|
}
|
|
|
|
_ => { }
|
|
|
|
}
|
|
|
|
match self.state {
|
|
|
|
ChainState::Both | ChainState::Front => {
|
|
|
|
accum = self.a.rfold(accum, &mut f);
|
|
|
|
}
|
|
|
|
_ => { }
|
|
|
|
}
|
|
|
|
accum
|
|
|
|
}
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
2016-08-13 13:42:36 -05:00
|
|
|
// Note: *both* must be fused to handle double-ended iterators.
|
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
impl<A, B> FusedIterator for Chain<A, B>
|
|
|
|
where A: FusedIterator,
|
|
|
|
B: FusedIterator<Item=A::Item>,
|
|
|
|
{}
|
|
|
|
|
2016-11-03 18:24:59 -05:00
|
|
|
#[unstable(feature = "trusted_len", issue = "37572")]
|
2016-10-20 07:07:06 -05:00
|
|
|
unsafe impl<A, B> TrustedLen for Chain<A, B>
|
|
|
|
where A: TrustedLen, B: TrustedLen<Item=A::Item>,
|
|
|
|
{}
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
/// An iterator that iterates two other iterators simultaneously.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// This `struct` is created by the [`zip`] method on [`Iterator`]. See its
|
2016-04-18 13:08:27 -05:00
|
|
|
/// documentation for more.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`zip`]: trait.Iterator.html#method.zip
|
2016-04-18 13:08:27 -05:00
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
pub struct Zip<A, B> {
|
|
|
|
a: A,
|
2016-04-21 14:35:39 -05:00
|
|
|
b: B,
|
Remove data structure specialization for .zip() iterator
Go back on half the specialization, the part that changed the Zip
struct's fields themselves depending on the types of the iterators.
This means that the Zip iterator will always carry two usize fields,
which are unused. If a whole for loop using a .zip() iterator is
inlined, these are simply removed and have no effect.
The same improvement for Zip of for example slice iterators remain, and
they still optimize well. However, like when the specialization of zip
was merged, the compiler is still very sensistive to the exact context.
For example this code only autovectorizes if the function is used, not
if the code in zip_sum_i32 is inserted inline it was called:
```
fn zip_sum_i32(xs: &[i32], ys: &[i32]) -> i32 {
let mut s = 0;
for (&x, &y) in xs.iter().zip(ys) {
s += x * y;
}
s
}
fn zipdot_i32_default_zip(b: &mut test::Bencher)
{
let xs = vec![1; 1024];
let ys = vec![1; 1024];
b.iter(|| {
zip_sum_i32(&xs, &ys)
})
}
```
Include a test that checks that Zip<T, U> is covariant w.r.t. T and U.
2016-09-15 02:59:55 -05:00
|
|
|
// index and len are only used by the specialized version of zip
|
|
|
|
index: usize,
|
|
|
|
len: usize,
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<A, B> Iterator for Zip<A, B> where A: Iterator, B: Iterator
|
|
|
|
{
|
|
|
|
type Item = (A::Item, B::Item);
|
|
|
|
|
|
|
|
#[inline]
|
2016-04-21 14:35:39 -05:00
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
|
|
|
ZipImpl::next(self)
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
2016-04-21 14:35:39 -05:00
|
|
|
ZipImpl::size_hint(self)
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<A, B> DoubleEndedIterator for Zip<A, B> where
|
|
|
|
A: DoubleEndedIterator + ExactSizeIterator,
|
|
|
|
B: DoubleEndedIterator + ExactSizeIterator,
|
|
|
|
{
|
|
|
|
#[inline]
|
|
|
|
fn next_back(&mut self) -> Option<(A::Item, B::Item)> {
|
2016-04-21 14:35:39 -05:00
|
|
|
ZipImpl::next_back(self)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Zip specialization trait
|
|
|
|
#[doc(hidden)]
|
|
|
|
trait ZipImpl<A, B> {
|
|
|
|
type Item;
|
|
|
|
fn new(a: A, b: B) -> Self;
|
|
|
|
fn next(&mut self) -> Option<Self::Item>;
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>);
|
|
|
|
fn next_back(&mut self) -> Option<Self::Item>
|
|
|
|
where A: DoubleEndedIterator + ExactSizeIterator,
|
|
|
|
B: DoubleEndedIterator + ExactSizeIterator;
|
|
|
|
}
|
|
|
|
|
|
|
|
// General Zip impl
|
|
|
|
#[doc(hidden)]
|
|
|
|
impl<A, B> ZipImpl<A, B> for Zip<A, B>
|
|
|
|
where A: Iterator, B: Iterator
|
|
|
|
{
|
|
|
|
type Item = (A::Item, B::Item);
|
|
|
|
default fn new(a: A, b: B) -> Self {
|
|
|
|
Zip {
|
2017-08-07 00:54:09 -05:00
|
|
|
a,
|
|
|
|
b,
|
Remove data structure specialization for .zip() iterator
Go back on half the specialization, the part that changed the Zip
struct's fields themselves depending on the types of the iterators.
This means that the Zip iterator will always carry two usize fields,
which are unused. If a whole for loop using a .zip() iterator is
inlined, these are simply removed and have no effect.
The same improvement for Zip of for example slice iterators remain, and
they still optimize well. However, like when the specialization of zip
was merged, the compiler is still very sensistive to the exact context.
For example this code only autovectorizes if the function is used, not
if the code in zip_sum_i32 is inserted inline it was called:
```
fn zip_sum_i32(xs: &[i32], ys: &[i32]) -> i32 {
let mut s = 0;
for (&x, &y) in xs.iter().zip(ys) {
s += x * y;
}
s
}
fn zipdot_i32_default_zip(b: &mut test::Bencher)
{
let xs = vec![1; 1024];
let ys = vec![1; 1024];
b.iter(|| {
zip_sum_i32(&xs, &ys)
})
}
```
Include a test that checks that Zip<T, U> is covariant w.r.t. T and U.
2016-09-15 02:59:55 -05:00
|
|
|
index: 0, // unused
|
|
|
|
len: 0, // unused
|
2016-04-21 14:35:39 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
default fn next(&mut self) -> Option<(A::Item, B::Item)> {
|
|
|
|
self.a.next().and_then(|x| {
|
|
|
|
self.b.next().and_then(|y| {
|
|
|
|
Some((x, y))
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
default fn next_back(&mut self) -> Option<(A::Item, B::Item)>
|
|
|
|
where A: DoubleEndedIterator + ExactSizeIterator,
|
|
|
|
B: DoubleEndedIterator + ExactSizeIterator
|
|
|
|
{
|
2016-04-18 13:08:27 -05:00
|
|
|
let a_sz = self.a.len();
|
|
|
|
let b_sz = self.b.len();
|
|
|
|
if a_sz != b_sz {
|
|
|
|
// Adjust a, b to equal length
|
|
|
|
if a_sz > b_sz {
|
|
|
|
for _ in 0..a_sz - b_sz { self.a.next_back(); }
|
|
|
|
} else {
|
|
|
|
for _ in 0..b_sz - a_sz { self.b.next_back(); }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
match (self.a.next_back(), self.b.next_back()) {
|
|
|
|
(Some(x), Some(y)) => Some((x, y)),
|
|
|
|
(None, None) => None,
|
|
|
|
_ => unreachable!(),
|
|
|
|
}
|
|
|
|
}
|
2016-04-21 14:35:39 -05:00
|
|
|
|
|
|
|
#[inline]
|
|
|
|
default fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
let (a_lower, a_upper) = self.a.size_hint();
|
|
|
|
let (b_lower, b_upper) = self.b.size_hint();
|
|
|
|
|
|
|
|
let lower = cmp::min(a_lower, b_lower);
|
|
|
|
|
|
|
|
let upper = match (a_upper, b_upper) {
|
|
|
|
(Some(x), Some(y)) => Some(cmp::min(x,y)),
|
|
|
|
(Some(x), None) => Some(x),
|
|
|
|
(None, Some(y)) => Some(y),
|
|
|
|
(None, None) => None
|
|
|
|
};
|
|
|
|
|
|
|
|
(lower, upper)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[doc(hidden)]
|
|
|
|
impl<A, B> ZipImpl<A, B> for Zip<A, B>
|
|
|
|
where A: TrustedRandomAccess, B: TrustedRandomAccess
|
|
|
|
{
|
|
|
|
fn new(a: A, b: B) -> Self {
|
|
|
|
let len = cmp::min(a.len(), b.len());
|
|
|
|
Zip {
|
2017-08-07 00:54:09 -05:00
|
|
|
a,
|
|
|
|
b,
|
Remove data structure specialization for .zip() iterator
Go back on half the specialization, the part that changed the Zip
struct's fields themselves depending on the types of the iterators.
This means that the Zip iterator will always carry two usize fields,
which are unused. If a whole for loop using a .zip() iterator is
inlined, these are simply removed and have no effect.
The same improvement for Zip of for example slice iterators remain, and
they still optimize well. However, like when the specialization of zip
was merged, the compiler is still very sensistive to the exact context.
For example this code only autovectorizes if the function is used, not
if the code in zip_sum_i32 is inserted inline it was called:
```
fn zip_sum_i32(xs: &[i32], ys: &[i32]) -> i32 {
let mut s = 0;
for (&x, &y) in xs.iter().zip(ys) {
s += x * y;
}
s
}
fn zipdot_i32_default_zip(b: &mut test::Bencher)
{
let xs = vec![1; 1024];
let ys = vec![1; 1024];
b.iter(|| {
zip_sum_i32(&xs, &ys)
})
}
```
Include a test that checks that Zip<T, U> is covariant w.r.t. T and U.
2016-09-15 02:59:55 -05:00
|
|
|
index: 0,
|
2017-08-07 00:54:09 -05:00
|
|
|
len,
|
2016-04-21 14:35:39 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn next(&mut self) -> Option<(A::Item, B::Item)> {
|
Remove data structure specialization for .zip() iterator
Go back on half the specialization, the part that changed the Zip
struct's fields themselves depending on the types of the iterators.
This means that the Zip iterator will always carry two usize fields,
which are unused. If a whole for loop using a .zip() iterator is
inlined, these are simply removed and have no effect.
The same improvement for Zip of for example slice iterators remain, and
they still optimize well. However, like when the specialization of zip
was merged, the compiler is still very sensistive to the exact context.
For example this code only autovectorizes if the function is used, not
if the code in zip_sum_i32 is inserted inline it was called:
```
fn zip_sum_i32(xs: &[i32], ys: &[i32]) -> i32 {
let mut s = 0;
for (&x, &y) in xs.iter().zip(ys) {
s += x * y;
}
s
}
fn zipdot_i32_default_zip(b: &mut test::Bencher)
{
let xs = vec![1; 1024];
let ys = vec![1; 1024];
b.iter(|| {
zip_sum_i32(&xs, &ys)
})
}
```
Include a test that checks that Zip<T, U> is covariant w.r.t. T and U.
2016-09-15 02:59:55 -05:00
|
|
|
if self.index < self.len {
|
|
|
|
let i = self.index;
|
|
|
|
self.index += 1;
|
2016-04-21 14:35:39 -05:00
|
|
|
unsafe {
|
|
|
|
Some((self.a.get_unchecked(i), self.b.get_unchecked(i)))
|
|
|
|
}
|
2016-10-17 03:58:21 -05:00
|
|
|
} else if A::may_have_side_effect() && self.index < self.a.len() {
|
|
|
|
// match the base implementation's potential side effects
|
|
|
|
unsafe {
|
|
|
|
self.a.get_unchecked(self.index);
|
|
|
|
}
|
|
|
|
self.index += 1;
|
|
|
|
None
|
2016-04-21 14:35:39 -05:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
Remove data structure specialization for .zip() iterator
Go back on half the specialization, the part that changed the Zip
struct's fields themselves depending on the types of the iterators.
This means that the Zip iterator will always carry two usize fields,
which are unused. If a whole for loop using a .zip() iterator is
inlined, these are simply removed and have no effect.
The same improvement for Zip of for example slice iterators remain, and
they still optimize well. However, like when the specialization of zip
was merged, the compiler is still very sensistive to the exact context.
For example this code only autovectorizes if the function is used, not
if the code in zip_sum_i32 is inserted inline it was called:
```
fn zip_sum_i32(xs: &[i32], ys: &[i32]) -> i32 {
let mut s = 0;
for (&x, &y) in xs.iter().zip(ys) {
s += x * y;
}
s
}
fn zipdot_i32_default_zip(b: &mut test::Bencher)
{
let xs = vec![1; 1024];
let ys = vec![1; 1024];
b.iter(|| {
zip_sum_i32(&xs, &ys)
})
}
```
Include a test that checks that Zip<T, U> is covariant w.r.t. T and U.
2016-09-15 02:59:55 -05:00
|
|
|
let len = self.len - self.index;
|
2016-04-21 14:35:39 -05:00
|
|
|
(len, Some(len))
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn next_back(&mut self) -> Option<(A::Item, B::Item)>
|
|
|
|
where A: DoubleEndedIterator + ExactSizeIterator,
|
|
|
|
B: DoubleEndedIterator + ExactSizeIterator
|
|
|
|
{
|
2016-10-17 03:58:21 -05:00
|
|
|
// Adjust a, b to equal length
|
|
|
|
if A::may_have_side_effect() {
|
|
|
|
let sz = self.a.len();
|
|
|
|
if sz > self.len {
|
|
|
|
for _ in 0..sz - cmp::max(self.len, self.index) {
|
|
|
|
self.a.next_back();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if B::may_have_side_effect() {
|
|
|
|
let sz = self.b.len();
|
|
|
|
if sz > self.len {
|
|
|
|
for _ in 0..sz - self.len {
|
|
|
|
self.b.next_back();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
Remove data structure specialization for .zip() iterator
Go back on half the specialization, the part that changed the Zip
struct's fields themselves depending on the types of the iterators.
This means that the Zip iterator will always carry two usize fields,
which are unused. If a whole for loop using a .zip() iterator is
inlined, these are simply removed and have no effect.
The same improvement for Zip of for example slice iterators remain, and
they still optimize well. However, like when the specialization of zip
was merged, the compiler is still very sensistive to the exact context.
For example this code only autovectorizes if the function is used, not
if the code in zip_sum_i32 is inserted inline it was called:
```
fn zip_sum_i32(xs: &[i32], ys: &[i32]) -> i32 {
let mut s = 0;
for (&x, &y) in xs.iter().zip(ys) {
s += x * y;
}
s
}
fn zipdot_i32_default_zip(b: &mut test::Bencher)
{
let xs = vec![1; 1024];
let ys = vec![1; 1024];
b.iter(|| {
zip_sum_i32(&xs, &ys)
})
}
```
Include a test that checks that Zip<T, U> is covariant w.r.t. T and U.
2016-09-15 02:59:55 -05:00
|
|
|
if self.index < self.len {
|
|
|
|
self.len -= 1;
|
|
|
|
let i = self.len;
|
2016-04-21 14:35:39 -05:00
|
|
|
unsafe {
|
|
|
|
Some((self.a.get_unchecked(i), self.b.get_unchecked(i)))
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
2016-04-18 16:44:02 -05:00
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<A, B> ExactSizeIterator for Zip<A, B>
|
|
|
|
where A: ExactSizeIterator, B: ExactSizeIterator {}
|
|
|
|
|
2016-04-21 14:35:39 -05:00
|
|
|
#[doc(hidden)]
|
|
|
|
unsafe impl<A, B> TrustedRandomAccess for Zip<A, B>
|
|
|
|
where A: TrustedRandomAccess,
|
|
|
|
B: TrustedRandomAccess,
|
|
|
|
{
|
|
|
|
unsafe fn get_unchecked(&mut self, i: usize) -> (A::Item, B::Item) {
|
|
|
|
(self.a.get_unchecked(i), self.b.get_unchecked(i))
|
|
|
|
}
|
|
|
|
|
2016-10-17 03:58:21 -05:00
|
|
|
fn may_have_side_effect() -> bool {
|
|
|
|
A::may_have_side_effect() || B::may_have_side_effect()
|
|
|
|
}
|
2016-04-21 14:35:39 -05:00
|
|
|
}
|
|
|
|
|
2016-08-13 13:42:36 -05:00
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
impl<A, B> FusedIterator for Zip<A, B>
|
|
|
|
where A: FusedIterator, B: FusedIterator, {}
|
|
|
|
|
2016-11-03 18:24:59 -05:00
|
|
|
#[unstable(feature = "trusted_len", issue = "37572")]
|
2016-10-20 07:07:06 -05:00
|
|
|
unsafe impl<A, B> TrustedLen for Zip<A, B>
|
|
|
|
where A: TrustedLen, B: TrustedLen,
|
|
|
|
{}
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
/// An iterator that maps the values of `iter` with `f`.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// This `struct` is created by the [`map`] method on [`Iterator`]. See its
|
2016-04-18 13:08:27 -05:00
|
|
|
/// documentation for more.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`map`]: trait.Iterator.html#method.map
|
2016-04-18 13:08:27 -05:00
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
///
|
|
|
|
/// # Notes about side effects
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// The [`map`] iterator implements [`DoubleEndedIterator`], meaning that
|
|
|
|
/// you can also [`map`] backwards:
|
2016-04-18 13:08:27 -05:00
|
|
|
///
|
|
|
|
/// ```rust
|
2016-11-23 04:33:39 -06:00
|
|
|
/// let v: Vec<i32> = vec![1, 2, 3].into_iter().map(|x| x + 1).rev().collect();
|
2016-04-18 13:08:27 -05:00
|
|
|
///
|
|
|
|
/// assert_eq!(v, [4, 3, 2]);
|
|
|
|
/// ```
|
|
|
|
///
|
|
|
|
/// [`DoubleEndedIterator`]: trait.DoubleEndedIterator.html
|
|
|
|
///
|
|
|
|
/// But if your closure has state, iterating backwards may act in a way you do
|
|
|
|
/// not expect. Let's go through an example. First, in the forward direction:
|
|
|
|
///
|
|
|
|
/// ```rust
|
|
|
|
/// let mut c = 0;
|
|
|
|
///
|
|
|
|
/// for pair in vec!['a', 'b', 'c'].into_iter()
|
|
|
|
/// .map(|letter| { c += 1; (letter, c) }) {
|
|
|
|
/// println!("{:?}", pair);
|
|
|
|
/// }
|
|
|
|
/// ```
|
|
|
|
///
|
|
|
|
/// This will print "('a', 1), ('b', 2), ('c', 3)".
|
|
|
|
///
|
|
|
|
/// Now consider this twist where we add a call to `rev`. This version will
|
|
|
|
/// print `('c', 1), ('b', 2), ('a', 3)`. Note that the letters are reversed,
|
|
|
|
/// but the values of the counter still go in order. This is because `map()` is
|
2017-08-10 17:16:18 -05:00
|
|
|
/// still being called lazily on each item, but we are popping items off the
|
2016-04-18 13:08:27 -05:00
|
|
|
/// back of the vector now, instead of shifting them from the front.
|
|
|
|
///
|
|
|
|
/// ```rust
|
|
|
|
/// let mut c = 0;
|
|
|
|
///
|
|
|
|
/// for pair in vec!['a', 'b', 'c'].into_iter()
|
|
|
|
/// .map(|letter| { c += 1; (letter, c) })
|
|
|
|
/// .rev() {
|
|
|
|
/// println!("{:?}", pair);
|
|
|
|
/// }
|
|
|
|
/// ```
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct Map<I, F> {
|
|
|
|
iter: I,
|
|
|
|
f: F,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "core_impl_debug", since = "1.9.0")]
|
|
|
|
impl<I: fmt::Debug, F> fmt::Debug for Map<I, F> {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
f.debug_struct("Map")
|
|
|
|
.field("iter", &self.iter)
|
|
|
|
.finish()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<B, I: Iterator, F> Iterator for Map<I, F> where F: FnMut(I::Item) -> B {
|
|
|
|
type Item = B;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn next(&mut self) -> Option<B> {
|
|
|
|
self.iter.next().map(&mut self.f)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
self.iter.size_hint()
|
|
|
|
}
|
2016-10-25 08:50:52 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
fn try_fold<Acc, G, R>(&mut self, init: Acc, mut g: G) -> R where
|
|
|
|
Self: Sized, G: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
let f = &mut self.f;
|
|
|
|
self.iter.try_fold(init, move |acc, elt| g(acc, f(elt)))
|
|
|
|
}
|
|
|
|
|
2016-10-25 08:50:52 -05:00
|
|
|
fn fold<Acc, G>(self, init: Acc, mut g: G) -> Acc
|
|
|
|
where G: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
let mut f = self.f;
|
|
|
|
self.iter.fold(init, move |acc, elt| g(acc, f(elt)))
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<B, I: DoubleEndedIterator, F> DoubleEndedIterator for Map<I, F> where
|
|
|
|
F: FnMut(I::Item) -> B,
|
|
|
|
{
|
|
|
|
#[inline]
|
|
|
|
fn next_back(&mut self) -> Option<B> {
|
|
|
|
self.iter.next_back().map(&mut self.f)
|
|
|
|
}
|
2017-09-18 14:27:19 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
fn try_rfold<Acc, G, R>(&mut self, init: Acc, mut g: G) -> R where
|
|
|
|
Self: Sized, G: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
let f = &mut self.f;
|
|
|
|
self.iter.try_rfold(init, move |acc, elt| g(acc, f(elt)))
|
|
|
|
}
|
|
|
|
|
2017-09-18 14:27:19 -05:00
|
|
|
fn rfold<Acc, G>(self, init: Acc, mut g: G) -> Acc
|
|
|
|
where G: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
let mut f = self.f;
|
|
|
|
self.iter.rfold(init, move |acc, elt| g(acc, f(elt)))
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
2016-04-18 16:44:02 -05:00
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<B, I: ExactSizeIterator, F> ExactSizeIterator for Map<I, F>
|
2016-11-22 16:48:33 -06:00
|
|
|
where F: FnMut(I::Item) -> B
|
|
|
|
{
|
|
|
|
fn len(&self) -> usize {
|
|
|
|
self.iter.len()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn is_empty(&self) -> bool {
|
|
|
|
self.iter.is_empty()
|
|
|
|
}
|
|
|
|
}
|
2016-04-18 16:44:02 -05:00
|
|
|
|
2016-08-13 13:42:36 -05:00
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
impl<B, I: FusedIterator, F> FusedIterator for Map<I, F>
|
|
|
|
where F: FnMut(I::Item) -> B {}
|
|
|
|
|
2016-11-03 18:24:59 -05:00
|
|
|
#[unstable(feature = "trusted_len", issue = "37572")]
|
2016-10-20 07:07:06 -05:00
|
|
|
unsafe impl<B, I, F> TrustedLen for Map<I, F>
|
|
|
|
where I: TrustedLen,
|
|
|
|
F: FnMut(I::Item) -> B {}
|
|
|
|
|
2016-10-17 03:58:21 -05:00
|
|
|
#[doc(hidden)]
|
|
|
|
unsafe impl<B, I, F> TrustedRandomAccess for Map<I, F>
|
|
|
|
where I: TrustedRandomAccess,
|
|
|
|
F: FnMut(I::Item) -> B,
|
|
|
|
{
|
|
|
|
unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item {
|
|
|
|
(self.f)(self.iter.get_unchecked(i))
|
|
|
|
}
|
|
|
|
#[inline]
|
|
|
|
fn may_have_side_effect() -> bool { true }
|
|
|
|
}
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
/// An iterator that filters the elements of `iter` with `predicate`.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// This `struct` is created by the [`filter`] method on [`Iterator`]. See its
|
2016-04-18 13:08:27 -05:00
|
|
|
/// documentation for more.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`filter`]: trait.Iterator.html#method.filter
|
2016-04-18 13:08:27 -05:00
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct Filter<I, P> {
|
|
|
|
iter: I,
|
|
|
|
predicate: P,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "core_impl_debug", since = "1.9.0")]
|
|
|
|
impl<I: fmt::Debug, P> fmt::Debug for Filter<I, P> {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
f.debug_struct("Filter")
|
|
|
|
.field("iter", &self.iter)
|
|
|
|
.finish()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I: Iterator, P> Iterator for Filter<I, P> where P: FnMut(&I::Item) -> bool {
|
|
|
|
type Item = I::Item;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn next(&mut self) -> Option<I::Item> {
|
2017-01-17 06:18:16 -06:00
|
|
|
for x in &mut self.iter {
|
2016-04-18 13:08:27 -05:00
|
|
|
if (self.predicate)(&x) {
|
|
|
|
return Some(x);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
None
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
let (_, upper) = self.iter.size_hint();
|
|
|
|
(0, upper) // can't know a lower bound, due to the predicate
|
|
|
|
}
|
2017-01-16 15:28:58 -06:00
|
|
|
|
2017-01-24 02:46:01 -06:00
|
|
|
// this special case allows the compiler to make `.filter(_).count()`
|
|
|
|
// branchless. Barring perfect branch prediction (which is unattainable in
|
|
|
|
// the general case), this will be much faster in >90% of cases (containing
|
|
|
|
// virtually all real workloads) and only a tiny bit slower in the rest.
|
|
|
|
//
|
|
|
|
// Having this specialization thus allows us to write `.filter(p).count()`
|
|
|
|
// where we would otherwise write `.map(|x| p(x) as usize).sum()`, which is
|
|
|
|
// less readable and also less backwards-compatible to Rust before 1.10.
|
|
|
|
//
|
|
|
|
// Using the branchless version will also simplify the LLVM byte code, thus
|
|
|
|
// leaving more budget for LLVM optimizations.
|
2017-01-16 15:28:58 -06:00
|
|
|
#[inline]
|
2017-01-17 06:18:16 -06:00
|
|
|
fn count(mut self) -> usize {
|
|
|
|
let mut count = 0;
|
|
|
|
for x in &mut self.iter {
|
|
|
|
count += (self.predicate)(&x) as usize;
|
2017-01-16 15:28:58 -06:00
|
|
|
}
|
2017-01-17 06:18:16 -06:00
|
|
|
count
|
2017-01-16 15:28:58 -06:00
|
|
|
}
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
#[inline]
|
|
|
|
fn try_fold<Acc, Fold, R>(&mut self, init: Acc, mut fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
let predicate = &mut self.predicate;
|
|
|
|
self.iter.try_fold(init, move |acc, item| if predicate(&item) {
|
|
|
|
fold(acc, item)
|
|
|
|
} else {
|
|
|
|
Try::from_ok(acc)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
#[inline]
|
|
|
|
fn fold<Acc, Fold>(self, init: Acc, mut fold: Fold) -> Acc
|
|
|
|
where Fold: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
let mut predicate = self.predicate;
|
|
|
|
self.iter.fold(init, move |acc, item| if predicate(&item) {
|
|
|
|
fold(acc, item)
|
|
|
|
} else {
|
|
|
|
acc
|
|
|
|
})
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I: DoubleEndedIterator, P> DoubleEndedIterator for Filter<I, P>
|
|
|
|
where P: FnMut(&I::Item) -> bool,
|
|
|
|
{
|
|
|
|
#[inline]
|
|
|
|
fn next_back(&mut self) -> Option<I::Item> {
|
|
|
|
for x in self.iter.by_ref().rev() {
|
|
|
|
if (self.predicate)(&x) {
|
|
|
|
return Some(x);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
None
|
|
|
|
}
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
#[inline]
|
|
|
|
fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, mut fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
let predicate = &mut self.predicate;
|
|
|
|
self.iter.try_rfold(init, move |acc, item| if predicate(&item) {
|
|
|
|
fold(acc, item)
|
|
|
|
} else {
|
|
|
|
Try::from_ok(acc)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
#[inline]
|
|
|
|
fn rfold<Acc, Fold>(self, init: Acc, mut fold: Fold) -> Acc
|
|
|
|
where Fold: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
let mut predicate = self.predicate;
|
|
|
|
self.iter.rfold(init, move |acc, item| if predicate(&item) {
|
|
|
|
fold(acc, item)
|
|
|
|
} else {
|
|
|
|
acc
|
|
|
|
})
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
2016-08-13 13:42:36 -05:00
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
impl<I: FusedIterator, P> FusedIterator for Filter<I, P>
|
|
|
|
where P: FnMut(&I::Item) -> bool {}
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
/// An iterator that uses `f` to both filter and map elements from `iter`.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// This `struct` is created by the [`filter_map`] method on [`Iterator`]. See its
|
2016-04-18 13:08:27 -05:00
|
|
|
/// documentation for more.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`filter_map`]: trait.Iterator.html#method.filter_map
|
2016-04-18 13:08:27 -05:00
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct FilterMap<I, F> {
|
|
|
|
iter: I,
|
|
|
|
f: F,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "core_impl_debug", since = "1.9.0")]
|
|
|
|
impl<I: fmt::Debug, F> fmt::Debug for FilterMap<I, F> {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
f.debug_struct("FilterMap")
|
|
|
|
.field("iter", &self.iter)
|
|
|
|
.finish()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<B, I: Iterator, F> Iterator for FilterMap<I, F>
|
|
|
|
where F: FnMut(I::Item) -> Option<B>,
|
|
|
|
{
|
|
|
|
type Item = B;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn next(&mut self) -> Option<B> {
|
|
|
|
for x in self.iter.by_ref() {
|
|
|
|
if let Some(y) = (self.f)(x) {
|
|
|
|
return Some(y);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
None
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
let (_, upper) = self.iter.size_hint();
|
|
|
|
(0, upper) // can't know a lower bound, due to the predicate
|
|
|
|
}
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
#[inline]
|
|
|
|
fn try_fold<Acc, Fold, R>(&mut self, init: Acc, mut fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
let f = &mut self.f;
|
|
|
|
self.iter.try_fold(init, move |acc, item| match f(item) {
|
|
|
|
Some(x) => fold(acc, x),
|
|
|
|
None => Try::from_ok(acc),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
#[inline]
|
|
|
|
fn fold<Acc, Fold>(self, init: Acc, mut fold: Fold) -> Acc
|
|
|
|
where Fold: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
let mut f = self.f;
|
|
|
|
self.iter.fold(init, move |acc, item| match f(item) {
|
|
|
|
Some(x) => fold(acc, x),
|
|
|
|
None => acc,
|
|
|
|
})
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<B, I: DoubleEndedIterator, F> DoubleEndedIterator for FilterMap<I, F>
|
|
|
|
where F: FnMut(I::Item) -> Option<B>,
|
|
|
|
{
|
|
|
|
#[inline]
|
|
|
|
fn next_back(&mut self) -> Option<B> {
|
|
|
|
for x in self.iter.by_ref().rev() {
|
|
|
|
if let Some(y) = (self.f)(x) {
|
|
|
|
return Some(y);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
None
|
|
|
|
}
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
#[inline]
|
|
|
|
fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, mut fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
let f = &mut self.f;
|
|
|
|
self.iter.try_rfold(init, move |acc, item| match f(item) {
|
|
|
|
Some(x) => fold(acc, x),
|
|
|
|
None => Try::from_ok(acc),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
#[inline]
|
|
|
|
fn rfold<Acc, Fold>(self, init: Acc, mut fold: Fold) -> Acc
|
|
|
|
where Fold: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
let mut f = self.f;
|
|
|
|
self.iter.rfold(init, move |acc, item| match f(item) {
|
|
|
|
Some(x) => fold(acc, x),
|
|
|
|
None => acc,
|
|
|
|
})
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
2016-08-13 13:42:36 -05:00
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
impl<B, I: FusedIterator, F> FusedIterator for FilterMap<I, F>
|
|
|
|
where F: FnMut(I::Item) -> Option<B> {}
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
/// An iterator that yields the current count and the element during iteration.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// This `struct` is created by the [`enumerate`] method on [`Iterator`]. See its
|
2016-04-18 13:08:27 -05:00
|
|
|
/// documentation for more.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`enumerate`]: trait.Iterator.html#method.enumerate
|
2016-04-18 13:08:27 -05:00
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
pub struct Enumerate<I> {
|
|
|
|
iter: I,
|
|
|
|
count: usize,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I> Iterator for Enumerate<I> where I: Iterator {
|
|
|
|
type Item = (usize, <I as Iterator>::Item);
|
|
|
|
|
|
|
|
/// # Overflow Behavior
|
|
|
|
///
|
|
|
|
/// The method does no guarding against overflows, so enumerating more than
|
|
|
|
/// `usize::MAX` elements either produces the wrong result or panics. If
|
|
|
|
/// debug assertions are enabled, a panic is guaranteed.
|
|
|
|
///
|
|
|
|
/// # Panics
|
|
|
|
///
|
|
|
|
/// Might panic if the index of the element overflows a `usize`.
|
|
|
|
#[inline]
|
2016-05-26 11:02:26 -05:00
|
|
|
#[rustc_inherit_overflow_checks]
|
2016-04-18 13:08:27 -05:00
|
|
|
fn next(&mut self) -> Option<(usize, <I as Iterator>::Item)> {
|
|
|
|
self.iter.next().map(|a| {
|
|
|
|
let ret = (self.count, a);
|
|
|
|
// Possible undefined overflow.
|
|
|
|
self.count += 1;
|
|
|
|
ret
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
self.iter.size_hint()
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
2016-05-26 11:02:26 -05:00
|
|
|
#[rustc_inherit_overflow_checks]
|
2016-04-18 13:08:27 -05:00
|
|
|
fn nth(&mut self, n: usize) -> Option<(usize, I::Item)> {
|
|
|
|
self.iter.nth(n).map(|a| {
|
|
|
|
let i = self.count + n;
|
|
|
|
self.count = i + 1;
|
|
|
|
(i, a)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn count(self) -> usize {
|
|
|
|
self.iter.count()
|
|
|
|
}
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
#[inline]
|
|
|
|
#[rustc_inherit_overflow_checks]
|
|
|
|
fn try_fold<Acc, Fold, R>(&mut self, init: Acc, mut fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
let count = &mut self.count;
|
|
|
|
self.iter.try_fold(init, move |acc, item| {
|
|
|
|
let acc = fold(acc, (*count, item));
|
|
|
|
*count += 1;
|
|
|
|
acc
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
#[inline]
|
|
|
|
#[rustc_inherit_overflow_checks]
|
|
|
|
fn fold<Acc, Fold>(self, init: Acc, mut fold: Fold) -> Acc
|
|
|
|
where Fold: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
let mut count = self.count;
|
|
|
|
self.iter.fold(init, move |acc, item| {
|
|
|
|
let acc = fold(acc, (count, item));
|
|
|
|
count += 1;
|
|
|
|
acc
|
|
|
|
})
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I> DoubleEndedIterator for Enumerate<I> where
|
|
|
|
I: ExactSizeIterator + DoubleEndedIterator
|
|
|
|
{
|
|
|
|
#[inline]
|
|
|
|
fn next_back(&mut self) -> Option<(usize, <I as Iterator>::Item)> {
|
|
|
|
self.iter.next_back().map(|a| {
|
|
|
|
let len = self.iter.len();
|
|
|
|
// Can safely add, `ExactSizeIterator` promises that the number of
|
|
|
|
// elements fits into a `usize`.
|
|
|
|
(self.count + len, a)
|
|
|
|
})
|
|
|
|
}
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
#[inline]
|
|
|
|
fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, mut fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
// Can safely add and subtract the count, as `ExactSizeIterator` promises
|
|
|
|
// that the number of elements fits into a `usize`.
|
|
|
|
let mut count = self.count + self.iter.len();
|
|
|
|
self.iter.try_rfold(init, move |acc, item| {
|
|
|
|
count -= 1;
|
|
|
|
fold(acc, (count, item))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
#[inline]
|
|
|
|
fn rfold<Acc, Fold>(self, init: Acc, mut fold: Fold) -> Acc
|
|
|
|
where Fold: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
// Can safely add and subtract the count, as `ExactSizeIterator` promises
|
|
|
|
// that the number of elements fits into a `usize`.
|
|
|
|
let mut count = self.count + self.iter.len();
|
|
|
|
self.iter.rfold(init, move |acc, item| {
|
|
|
|
count -= 1;
|
|
|
|
fold(acc, (count, item))
|
|
|
|
})
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
2016-04-18 16:44:02 -05:00
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
2016-11-22 16:48:33 -06:00
|
|
|
impl<I> ExactSizeIterator for Enumerate<I> where I: ExactSizeIterator {
|
|
|
|
fn len(&self) -> usize {
|
|
|
|
self.iter.len()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn is_empty(&self) -> bool {
|
|
|
|
self.iter.is_empty()
|
|
|
|
}
|
|
|
|
}
|
2016-04-18 16:44:02 -05:00
|
|
|
|
2016-04-21 14:35:39 -05:00
|
|
|
#[doc(hidden)]
|
|
|
|
unsafe impl<I> TrustedRandomAccess for Enumerate<I>
|
|
|
|
where I: TrustedRandomAccess
|
|
|
|
{
|
|
|
|
unsafe fn get_unchecked(&mut self, i: usize) -> (usize, I::Item) {
|
|
|
|
(self.count + i, self.iter.get_unchecked(i))
|
|
|
|
}
|
2016-10-17 03:58:21 -05:00
|
|
|
|
|
|
|
fn may_have_side_effect() -> bool {
|
|
|
|
I::may_have_side_effect()
|
|
|
|
}
|
2016-04-21 14:35:39 -05:00
|
|
|
}
|
|
|
|
|
2016-08-13 13:42:36 -05:00
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
impl<I> FusedIterator for Enumerate<I> where I: FusedIterator {}
|
|
|
|
|
2016-11-03 18:24:59 -05:00
|
|
|
#[unstable(feature = "trusted_len", issue = "37572")]
|
2016-10-20 07:07:06 -05:00
|
|
|
unsafe impl<I> TrustedLen for Enumerate<I>
|
|
|
|
where I: TrustedLen,
|
|
|
|
{}
|
|
|
|
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
/// An iterator with a `peek()` that returns an optional reference to the next
|
|
|
|
/// element.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// This `struct` is created by the [`peekable`] method on [`Iterator`]. See its
|
2016-04-18 13:08:27 -05:00
|
|
|
/// documentation for more.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`peekable`]: trait.Iterator.html#method.peekable
|
2016-04-18 13:08:27 -05:00
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
pub struct Peekable<I: Iterator> {
|
|
|
|
iter: I,
|
2016-11-17 11:27:37 -06:00
|
|
|
/// Remember a peeked value, even if it was None.
|
|
|
|
peeked: Option<Option<I::Item>>,
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
2016-11-17 11:27:37 -06:00
|
|
|
// Peekable must remember if a None has been seen in the `.peek()` method.
|
|
|
|
// It ensures that `.peek(); .peek();` or `.peek(); .next();` only advances the
|
|
|
|
// underlying iterator at most once. This does not by itself make the iterator
|
|
|
|
// fused.
|
2016-04-18 13:08:27 -05:00
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I: Iterator> Iterator for Peekable<I> {
|
|
|
|
type Item = I::Item;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn next(&mut self) -> Option<I::Item> {
|
2016-11-17 11:27:37 -06:00
|
|
|
match self.peeked.take() {
|
|
|
|
Some(v) => v,
|
2016-04-18 13:08:27 -05:00
|
|
|
None => self.iter.next(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
2016-05-26 11:02:26 -05:00
|
|
|
#[rustc_inherit_overflow_checks]
|
2016-11-17 11:27:37 -06:00
|
|
|
fn count(mut self) -> usize {
|
|
|
|
match self.peeked.take() {
|
|
|
|
Some(None) => 0,
|
|
|
|
Some(Some(_)) => 1 + self.iter.count(),
|
|
|
|
None => self.iter.count(),
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn nth(&mut self, n: usize) -> Option<I::Item> {
|
2017-12-05 16:51:47 -06:00
|
|
|
// FIXME(#6393): merge these when borrow-checking gets better.
|
2017-12-05 06:09:16 -06:00
|
|
|
if n == 0 {
|
|
|
|
match self.peeked.take() {
|
|
|
|
Some(v) => v,
|
|
|
|
None => self.iter.nth(n),
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
match self.peeked.take() {
|
|
|
|
Some(None) => None,
|
|
|
|
Some(Some(_)) => self.iter.nth(n - 1),
|
|
|
|
None => self.iter.nth(n),
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
2016-11-17 11:27:37 -06:00
|
|
|
fn last(mut self) -> Option<I::Item> {
|
|
|
|
let peek_opt = match self.peeked.take() {
|
|
|
|
Some(None) => return None,
|
|
|
|
Some(v) => v,
|
|
|
|
None => None,
|
|
|
|
};
|
|
|
|
self.iter.last().or(peek_opt)
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
2016-11-17 11:27:37 -06:00
|
|
|
let peek_len = match self.peeked {
|
|
|
|
Some(None) => return (0, Some(0)),
|
|
|
|
Some(Some(_)) => 1,
|
|
|
|
None => 0,
|
|
|
|
};
|
2016-04-18 13:08:27 -05:00
|
|
|
let (lo, hi) = self.iter.size_hint();
|
2016-11-17 11:27:37 -06:00
|
|
|
let lo = lo.saturating_add(peek_len);
|
|
|
|
let hi = hi.and_then(|x| x.checked_add(peek_len));
|
|
|
|
(lo, hi)
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
#[inline]
|
|
|
|
fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R where
|
|
|
|
Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try<Ok=B>
|
|
|
|
{
|
|
|
|
let acc = match self.peeked.take() {
|
|
|
|
Some(None) => return Try::from_ok(init),
|
|
|
|
Some(Some(v)) => f(init, v)?,
|
|
|
|
None => init,
|
|
|
|
};
|
|
|
|
self.iter.try_fold(acc, f)
|
|
|
|
}
|
|
|
|
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
#[inline]
|
|
|
|
fn fold<Acc, Fold>(self, init: Acc, mut fold: Fold) -> Acc
|
|
|
|
where Fold: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
let acc = match self.peeked {
|
|
|
|
Some(None) => return init,
|
|
|
|
Some(Some(v)) => fold(init, v),
|
|
|
|
None => init,
|
|
|
|
};
|
|
|
|
self.iter.fold(acc, fold)
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I: ExactSizeIterator> ExactSizeIterator for Peekable<I> {}
|
|
|
|
|
2016-08-13 13:42:36 -05:00
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
impl<I: FusedIterator> FusedIterator for Peekable<I> {}
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
impl<I: Iterator> Peekable<I> {
|
|
|
|
/// Returns a reference to the next() value without advancing the iterator.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// Like [`next`], if there is a value, it is wrapped in a `Some(T)`.
|
2016-04-28 16:51:42 -05:00
|
|
|
/// But if the iteration is over, `None` is returned.
|
2016-04-18 13:08:27 -05:00
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`next`]: trait.Iterator.html#tymethod.next
|
2016-04-18 13:08:27 -05:00
|
|
|
///
|
2016-04-28 16:51:42 -05:00
|
|
|
/// Because `peek()` returns a reference, and many iterators iterate over
|
|
|
|
/// references, there can be a possibly confusing situation where the
|
2016-04-18 13:08:27 -05:00
|
|
|
/// return value is a double reference. You can see this effect in the
|
2016-04-28 16:51:42 -05:00
|
|
|
/// examples below.
|
2016-04-18 13:08:27 -05:00
|
|
|
///
|
|
|
|
/// # Examples
|
|
|
|
///
|
|
|
|
/// Basic usage:
|
|
|
|
///
|
|
|
|
/// ```
|
|
|
|
/// let xs = [1, 2, 3];
|
|
|
|
///
|
|
|
|
/// let mut iter = xs.iter().peekable();
|
|
|
|
///
|
|
|
|
/// // peek() lets us see into the future
|
|
|
|
/// assert_eq!(iter.peek(), Some(&&1));
|
|
|
|
/// assert_eq!(iter.next(), Some(&1));
|
|
|
|
///
|
|
|
|
/// assert_eq!(iter.next(), Some(&2));
|
|
|
|
///
|
2016-04-28 16:51:42 -05:00
|
|
|
/// // The iterator does not advance even if we `peek` multiple times
|
2016-04-18 13:08:27 -05:00
|
|
|
/// assert_eq!(iter.peek(), Some(&&3));
|
|
|
|
/// assert_eq!(iter.peek(), Some(&&3));
|
|
|
|
///
|
|
|
|
/// assert_eq!(iter.next(), Some(&3));
|
|
|
|
///
|
2016-04-28 16:51:42 -05:00
|
|
|
/// // After the iterator is finished, so is `peek()`
|
2016-04-18 13:08:27 -05:00
|
|
|
/// assert_eq!(iter.peek(), None);
|
|
|
|
/// assert_eq!(iter.next(), None);
|
|
|
|
/// ```
|
|
|
|
#[inline]
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
pub fn peek(&mut self) -> Option<&I::Item> {
|
|
|
|
if self.peeked.is_none() {
|
2016-11-17 11:27:37 -06:00
|
|
|
self.peeked = Some(self.iter.next());
|
|
|
|
}
|
|
|
|
match self.peeked {
|
|
|
|
Some(Some(ref value)) => Some(value),
|
|
|
|
Some(None) => None,
|
|
|
|
_ => unreachable!(),
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// An iterator that rejects elements while `predicate` is true.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// This `struct` is created by the [`skip_while`] method on [`Iterator`]. See its
|
2016-04-18 13:08:27 -05:00
|
|
|
/// documentation for more.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`skip_while`]: trait.Iterator.html#method.skip_while
|
2016-04-18 13:08:27 -05:00
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct SkipWhile<I, P> {
|
|
|
|
iter: I,
|
|
|
|
flag: bool,
|
|
|
|
predicate: P,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "core_impl_debug", since = "1.9.0")]
|
|
|
|
impl<I: fmt::Debug, P> fmt::Debug for SkipWhile<I, P> {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
f.debug_struct("SkipWhile")
|
|
|
|
.field("iter", &self.iter)
|
|
|
|
.field("flag", &self.flag)
|
|
|
|
.finish()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I: Iterator, P> Iterator for SkipWhile<I, P>
|
|
|
|
where P: FnMut(&I::Item) -> bool
|
|
|
|
{
|
|
|
|
type Item = I::Item;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn next(&mut self) -> Option<I::Item> {
|
2017-10-23 00:47:27 -05:00
|
|
|
let flag = &mut self.flag;
|
|
|
|
let pred = &mut self.predicate;
|
|
|
|
self.iter.find(move |x| {
|
|
|
|
if *flag || !pred(x) {
|
|
|
|
*flag = true;
|
|
|
|
true
|
|
|
|
} else {
|
|
|
|
false
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
2017-10-23 00:47:27 -05:00
|
|
|
})
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
let (_, upper) = self.iter.size_hint();
|
|
|
|
(0, upper) // can't know a lower bound, due to the predicate
|
|
|
|
}
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
#[inline]
|
|
|
|
fn try_fold<Acc, Fold, R>(&mut self, mut init: Acc, mut fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
if !self.flag {
|
|
|
|
match self.next() {
|
|
|
|
Some(v) => init = fold(init, v)?,
|
|
|
|
None => return Try::from_ok(init),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
self.iter.try_fold(init, fold)
|
|
|
|
}
|
|
|
|
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
#[inline]
|
|
|
|
fn fold<Acc, Fold>(mut self, mut init: Acc, mut fold: Fold) -> Acc
|
|
|
|
where Fold: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
if !self.flag {
|
|
|
|
match self.next() {
|
|
|
|
Some(v) => init = fold(init, v),
|
|
|
|
None => return init,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
self.iter.fold(init, fold)
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
2016-08-13 13:42:36 -05:00
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
impl<I, P> FusedIterator for SkipWhile<I, P>
|
|
|
|
where I: FusedIterator, P: FnMut(&I::Item) -> bool {}
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
/// An iterator that only accepts elements while `predicate` is true.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// This `struct` is created by the [`take_while`] method on [`Iterator`]. See its
|
2016-04-18 13:08:27 -05:00
|
|
|
/// documentation for more.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`take_while`]: trait.Iterator.html#method.take_while
|
2016-04-18 13:08:27 -05:00
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct TakeWhile<I, P> {
|
|
|
|
iter: I,
|
|
|
|
flag: bool,
|
|
|
|
predicate: P,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "core_impl_debug", since = "1.9.0")]
|
|
|
|
impl<I: fmt::Debug, P> fmt::Debug for TakeWhile<I, P> {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
f.debug_struct("TakeWhile")
|
|
|
|
.field("iter", &self.iter)
|
|
|
|
.field("flag", &self.flag)
|
|
|
|
.finish()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I: Iterator, P> Iterator for TakeWhile<I, P>
|
|
|
|
where P: FnMut(&I::Item) -> bool
|
|
|
|
{
|
|
|
|
type Item = I::Item;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn next(&mut self) -> Option<I::Item> {
|
|
|
|
if self.flag {
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
self.iter.next().and_then(|x| {
|
|
|
|
if (self.predicate)(&x) {
|
|
|
|
Some(x)
|
|
|
|
} else {
|
|
|
|
self.flag = true;
|
|
|
|
None
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
let (_, upper) = self.iter.size_hint();
|
|
|
|
(0, upper) // can't know a lower bound, due to the predicate
|
|
|
|
}
|
2017-10-23 00:47:27 -05:00
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn try_fold<Acc, Fold, R>(&mut self, init: Acc, mut fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
if self.flag {
|
|
|
|
Try::from_ok(init)
|
|
|
|
} else {
|
|
|
|
let flag = &mut self.flag;
|
|
|
|
let p = &mut self.predicate;
|
|
|
|
self.iter.try_fold(init, move |acc, x|{
|
|
|
|
if p(&x) {
|
|
|
|
LoopState::from_try(fold(acc, x))
|
|
|
|
} else {
|
|
|
|
*flag = true;
|
|
|
|
LoopState::Break(Try::from_ok(acc))
|
|
|
|
}
|
|
|
|
}).into_try()
|
|
|
|
}
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
2016-08-13 13:42:36 -05:00
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
impl<I, P> FusedIterator for TakeWhile<I, P>
|
|
|
|
where I: FusedIterator, P: FnMut(&I::Item) -> bool {}
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
/// An iterator that skips over `n` elements of `iter`.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// This `struct` is created by the [`skip`] method on [`Iterator`]. See its
|
2016-04-18 13:08:27 -05:00
|
|
|
/// documentation for more.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`skip`]: trait.Iterator.html#method.skip
|
2016-04-18 13:08:27 -05:00
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
pub struct Skip<I> {
|
|
|
|
iter: I,
|
|
|
|
n: usize
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I> Iterator for Skip<I> where I: Iterator {
|
|
|
|
type Item = <I as Iterator>::Item;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn next(&mut self) -> Option<I::Item> {
|
|
|
|
if self.n == 0 {
|
|
|
|
self.iter.next()
|
|
|
|
} else {
|
|
|
|
let old_n = self.n;
|
|
|
|
self.n = 0;
|
|
|
|
self.iter.nth(old_n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn nth(&mut self, n: usize) -> Option<I::Item> {
|
|
|
|
// Can't just add n + self.n due to overflow.
|
|
|
|
if self.n == 0 {
|
|
|
|
self.iter.nth(n)
|
|
|
|
} else {
|
|
|
|
let to_skip = self.n;
|
|
|
|
self.n = 0;
|
|
|
|
// nth(n) skips n+1
|
|
|
|
if self.iter.nth(to_skip-1).is_none() {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
self.iter.nth(n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn count(self) -> usize {
|
|
|
|
self.iter.count().saturating_sub(self.n)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn last(mut self) -> Option<I::Item> {
|
|
|
|
if self.n == 0 {
|
|
|
|
self.iter.last()
|
|
|
|
} else {
|
|
|
|
let next = self.next();
|
|
|
|
if next.is_some() {
|
|
|
|
// recurse. n should be 0.
|
|
|
|
self.last().or(next)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
let (lower, upper) = self.iter.size_hint();
|
|
|
|
|
|
|
|
let lower = lower.saturating_sub(self.n);
|
|
|
|
let upper = upper.map(|x| x.saturating_sub(self.n));
|
|
|
|
|
|
|
|
(lower, upper)
|
|
|
|
}
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
#[inline]
|
|
|
|
fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
let n = self.n;
|
|
|
|
self.n = 0;
|
|
|
|
if n > 0 {
|
|
|
|
// nth(n) skips n+1
|
|
|
|
if self.iter.nth(n - 1).is_none() {
|
|
|
|
return Try::from_ok(init);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
self.iter.try_fold(init, fold)
|
|
|
|
}
|
|
|
|
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
#[inline]
|
|
|
|
fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
|
|
|
|
where Fold: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
if self.n > 0 {
|
|
|
|
// nth(n) skips n+1
|
|
|
|
if self.iter.nth(self.n - 1).is_none() {
|
|
|
|
return init;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
self.iter.fold(init, fold)
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I> ExactSizeIterator for Skip<I> where I: ExactSizeIterator {}
|
|
|
|
|
2017-05-20 02:38:39 -05:00
|
|
|
#[stable(feature = "double_ended_skip_iterator", since = "1.9.0")]
|
2016-04-18 13:08:27 -05:00
|
|
|
impl<I> DoubleEndedIterator for Skip<I> where I: DoubleEndedIterator + ExactSizeIterator {
|
|
|
|
fn next_back(&mut self) -> Option<Self::Item> {
|
|
|
|
if self.len() > 0 {
|
|
|
|
self.iter.next_back()
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
2017-10-23 00:47:27 -05:00
|
|
|
|
|
|
|
fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, mut fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
let mut n = self.len();
|
|
|
|
if n == 0 {
|
|
|
|
Try::from_ok(init)
|
|
|
|
} else {
|
|
|
|
self.iter.try_rfold(init, move |acc, x| {
|
|
|
|
n -= 1;
|
|
|
|
let r = fold(acc, x);
|
|
|
|
if n == 0 { LoopState::Break(r) }
|
|
|
|
else { LoopState::from_try(r) }
|
|
|
|
}).into_try()
|
|
|
|
}
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
2016-08-13 13:42:36 -05:00
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
impl<I> FusedIterator for Skip<I> where I: FusedIterator {}
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
/// An iterator that only iterates over the first `n` iterations of `iter`.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// This `struct` is created by the [`take`] method on [`Iterator`]. See its
|
2016-04-18 13:08:27 -05:00
|
|
|
/// documentation for more.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`take`]: trait.Iterator.html#method.take
|
2016-04-18 13:08:27 -05:00
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
pub struct Take<I> {
|
|
|
|
iter: I,
|
|
|
|
n: usize
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I> Iterator for Take<I> where I: Iterator{
|
|
|
|
type Item = <I as Iterator>::Item;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn next(&mut self) -> Option<<I as Iterator>::Item> {
|
|
|
|
if self.n != 0 {
|
|
|
|
self.n -= 1;
|
|
|
|
self.iter.next()
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn nth(&mut self, n: usize) -> Option<I::Item> {
|
|
|
|
if self.n > n {
|
|
|
|
self.n -= n + 1;
|
|
|
|
self.iter.nth(n)
|
|
|
|
} else {
|
|
|
|
if self.n > 0 {
|
|
|
|
self.iter.nth(self.n - 1);
|
|
|
|
self.n = 0;
|
|
|
|
}
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
let (lower, upper) = self.iter.size_hint();
|
|
|
|
|
|
|
|
let lower = cmp::min(lower, self.n);
|
|
|
|
|
|
|
|
let upper = match upper {
|
|
|
|
Some(x) if x < self.n => Some(x),
|
|
|
|
_ => Some(self.n)
|
|
|
|
};
|
|
|
|
|
|
|
|
(lower, upper)
|
|
|
|
}
|
2017-10-23 00:47:27 -05:00
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn try_fold<Acc, Fold, R>(&mut self, init: Acc, mut fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
if self.n == 0 {
|
|
|
|
Try::from_ok(init)
|
|
|
|
} else {
|
|
|
|
let n = &mut self.n;
|
|
|
|
self.iter.try_fold(init, move |acc, x| {
|
|
|
|
*n -= 1;
|
|
|
|
let r = fold(acc, x);
|
|
|
|
if *n == 0 { LoopState::Break(r) }
|
|
|
|
else { LoopState::from_try(r) }
|
|
|
|
}).into_try()
|
|
|
|
}
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I> ExactSizeIterator for Take<I> where I: ExactSizeIterator {}
|
|
|
|
|
2016-08-13 13:42:36 -05:00
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
impl<I> FusedIterator for Take<I> where I: FusedIterator {}
|
2016-04-18 13:08:27 -05:00
|
|
|
|
|
|
|
/// An iterator to maintain state while iterating another iterator.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// This `struct` is created by the [`scan`] method on [`Iterator`]. See its
|
2016-04-18 13:08:27 -05:00
|
|
|
/// documentation for more.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`scan`]: trait.Iterator.html#method.scan
|
2016-04-18 13:08:27 -05:00
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct Scan<I, St, F> {
|
|
|
|
iter: I,
|
|
|
|
f: F,
|
|
|
|
state: St,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "core_impl_debug", since = "1.9.0")]
|
|
|
|
impl<I: fmt::Debug, St: fmt::Debug, F> fmt::Debug for Scan<I, St, F> {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
f.debug_struct("Scan")
|
|
|
|
.field("iter", &self.iter)
|
|
|
|
.field("state", &self.state)
|
|
|
|
.finish()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<B, I, St, F> Iterator for Scan<I, St, F> where
|
|
|
|
I: Iterator,
|
|
|
|
F: FnMut(&mut St, I::Item) -> Option<B>,
|
|
|
|
{
|
|
|
|
type Item = B;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn next(&mut self) -> Option<B> {
|
|
|
|
self.iter.next().and_then(|a| (self.f)(&mut self.state, a))
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
let (_, upper) = self.iter.size_hint();
|
|
|
|
(0, upper) // can't know a lower bound, due to the scan function
|
|
|
|
}
|
2017-10-23 00:47:27 -05:00
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn try_fold<Acc, Fold, R>(&mut self, init: Acc, mut fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
let state = &mut self.state;
|
|
|
|
let f = &mut self.f;
|
|
|
|
self.iter.try_fold(init, move |acc, x| {
|
|
|
|
match f(state, x) {
|
|
|
|
None => LoopState::Break(Try::from_ok(acc)),
|
|
|
|
Some(x) => LoopState::from_try(fold(acc, x)),
|
|
|
|
}
|
|
|
|
}).into_try()
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// An iterator that maps each element to an iterator, and yields the elements
|
|
|
|
/// of the produced iterators.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// This `struct` is created by the [`flat_map`] method on [`Iterator`]. See its
|
2016-04-18 13:08:27 -05:00
|
|
|
/// documentation for more.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`flat_map`]: trait.Iterator.html#method.flat_map
|
2016-04-18 13:08:27 -05:00
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct FlatMap<I, U: IntoIterator, F> {
|
|
|
|
iter: I,
|
|
|
|
f: F,
|
|
|
|
frontiter: Option<U::IntoIter>,
|
|
|
|
backiter: Option<U::IntoIter>,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "core_impl_debug", since = "1.9.0")]
|
|
|
|
impl<I: fmt::Debug, U: IntoIterator, F> fmt::Debug for FlatMap<I, U, F>
|
|
|
|
where U::IntoIter: fmt::Debug
|
|
|
|
{
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
f.debug_struct("FlatMap")
|
|
|
|
.field("iter", &self.iter)
|
|
|
|
.field("frontiter", &self.frontiter)
|
|
|
|
.field("backiter", &self.backiter)
|
|
|
|
.finish()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I: Iterator, U: IntoIterator, F> Iterator for FlatMap<I, U, F>
|
|
|
|
where F: FnMut(I::Item) -> U,
|
|
|
|
{
|
|
|
|
type Item = U::Item;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn next(&mut self) -> Option<U::Item> {
|
|
|
|
loop {
|
|
|
|
if let Some(ref mut inner) = self.frontiter {
|
|
|
|
if let Some(x) = inner.by_ref().next() {
|
|
|
|
return Some(x)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
match self.iter.next().map(&mut self.f) {
|
|
|
|
None => return self.backiter.as_mut().and_then(|it| it.next()),
|
|
|
|
next => self.frontiter = next.map(IntoIterator::into_iter),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
let (flo, fhi) = self.frontiter.as_ref().map_or((0, Some(0)), |it| it.size_hint());
|
|
|
|
let (blo, bhi) = self.backiter.as_ref().map_or((0, Some(0)), |it| it.size_hint());
|
|
|
|
let lo = flo.saturating_add(blo);
|
|
|
|
match (self.iter.size_hint(), fhi, bhi) {
|
|
|
|
((0, Some(0)), Some(a), Some(b)) => (lo, a.checked_add(b)),
|
|
|
|
_ => (lo, None)
|
|
|
|
}
|
|
|
|
}
|
Customize `<FlatMap as Iterator>::fold`
`FlatMap` can use internal iteration for its `fold`, which shows a
performance advantage in the new benchmarks:
test iter::bench_flat_map_chain_ref_sum ... bench: 4,354,111 ns/iter (+/- 108,871)
test iter::bench_flat_map_chain_sum ... bench: 468,167 ns/iter (+/- 2,274)
test iter::bench_flat_map_ref_sum ... bench: 449,616 ns/iter (+/- 6,257)
test iter::bench_flat_map_sum ... bench: 348,010 ns/iter (+/- 1,227)
... where the "ref" benches are using `by_ref()` that isn't optimized.
So this change shows a decent advantage on its own, but much more when
combined with a `chain` iterator that also optimizes `fold`.
2017-09-14 15:51:32 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
#[inline]
|
|
|
|
fn try_fold<Acc, Fold, R>(&mut self, mut init: Acc, mut fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
if let Some(ref mut front) = self.frontiter {
|
|
|
|
init = front.try_fold(init, &mut fold)?;
|
|
|
|
}
|
|
|
|
self.frontiter = None;
|
|
|
|
|
|
|
|
{
|
|
|
|
let f = &mut self.f;
|
|
|
|
let frontiter = &mut self.frontiter;
|
|
|
|
init = self.iter.try_fold(init, |acc, x| {
|
|
|
|
let mut mid = f(x).into_iter();
|
|
|
|
let r = mid.try_fold(acc, &mut fold);
|
|
|
|
*frontiter = Some(mid);
|
|
|
|
r
|
|
|
|
})?;
|
|
|
|
}
|
|
|
|
self.frontiter = None;
|
|
|
|
|
|
|
|
if let Some(ref mut back) = self.backiter {
|
|
|
|
init = back.try_fold(init, &mut fold)?;
|
|
|
|
}
|
|
|
|
self.backiter = None;
|
|
|
|
|
|
|
|
Try::from_ok(init)
|
|
|
|
}
|
|
|
|
|
Customize `<FlatMap as Iterator>::fold`
`FlatMap` can use internal iteration for its `fold`, which shows a
performance advantage in the new benchmarks:
test iter::bench_flat_map_chain_ref_sum ... bench: 4,354,111 ns/iter (+/- 108,871)
test iter::bench_flat_map_chain_sum ... bench: 468,167 ns/iter (+/- 2,274)
test iter::bench_flat_map_ref_sum ... bench: 449,616 ns/iter (+/- 6,257)
test iter::bench_flat_map_sum ... bench: 348,010 ns/iter (+/- 1,227)
... where the "ref" benches are using `by_ref()` that isn't optimized.
So this change shows a decent advantage on its own, but much more when
combined with a `chain` iterator that also optimizes `fold`.
2017-09-14 15:51:32 -05:00
|
|
|
#[inline]
|
|
|
|
fn fold<Acc, Fold>(self, init: Acc, mut fold: Fold) -> Acc
|
|
|
|
where Fold: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
self.frontiter.into_iter()
|
|
|
|
.chain(self.iter.map(self.f).map(U::into_iter))
|
|
|
|
.chain(self.backiter)
|
|
|
|
.fold(init, |acc, iter| iter.fold(acc, &mut fold))
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I: DoubleEndedIterator, U, F> DoubleEndedIterator for FlatMap<I, U, F> where
|
|
|
|
F: FnMut(I::Item) -> U,
|
|
|
|
U: IntoIterator,
|
|
|
|
U::IntoIter: DoubleEndedIterator
|
|
|
|
{
|
|
|
|
#[inline]
|
|
|
|
fn next_back(&mut self) -> Option<U::Item> {
|
|
|
|
loop {
|
|
|
|
if let Some(ref mut inner) = self.backiter {
|
|
|
|
if let Some(y) = inner.next_back() {
|
|
|
|
return Some(y)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
match self.iter.next_back().map(&mut self.f) {
|
|
|
|
None => return self.frontiter.as_mut().and_then(|it| it.next_back()),
|
|
|
|
next => self.backiter = next.map(IntoIterator::into_iter),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
#[inline]
|
|
|
|
fn try_rfold<Acc, Fold, R>(&mut self, mut init: Acc, mut fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
if let Some(ref mut back) = self.backiter {
|
|
|
|
init = back.try_rfold(init, &mut fold)?;
|
|
|
|
}
|
|
|
|
self.backiter = None;
|
|
|
|
|
|
|
|
{
|
|
|
|
let f = &mut self.f;
|
|
|
|
let backiter = &mut self.backiter;
|
|
|
|
init = self.iter.try_rfold(init, |acc, x| {
|
|
|
|
let mut mid = f(x).into_iter();
|
|
|
|
let r = mid.try_rfold(acc, &mut fold);
|
|
|
|
*backiter = Some(mid);
|
|
|
|
r
|
|
|
|
})?;
|
|
|
|
}
|
|
|
|
self.backiter = None;
|
|
|
|
|
|
|
|
if let Some(ref mut front) = self.frontiter {
|
|
|
|
init = front.try_rfold(init, &mut fold)?;
|
|
|
|
}
|
|
|
|
self.frontiter = None;
|
|
|
|
|
|
|
|
Try::from_ok(init)
|
|
|
|
}
|
|
|
|
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
#[inline]
|
|
|
|
fn rfold<Acc, Fold>(self, init: Acc, mut fold: Fold) -> Acc
|
|
|
|
where Fold: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
self.frontiter.into_iter()
|
|
|
|
.chain(self.iter.map(self.f).map(U::into_iter))
|
|
|
|
.chain(self.backiter)
|
|
|
|
.rfold(init, |acc, iter| iter.rfold(acc, &mut fold))
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
2016-08-13 13:42:36 -05:00
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
impl<I, U, F> FusedIterator for FlatMap<I, U, F>
|
|
|
|
where I: FusedIterator, U: IntoIterator, F: FnMut(I::Item) -> U {}
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
/// An iterator that yields `None` forever after the underlying iterator
|
|
|
|
/// yields `None` once.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// This `struct` is created by the [`fuse`] method on [`Iterator`]. See its
|
2016-04-18 13:08:27 -05:00
|
|
|
/// documentation for more.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`fuse`]: trait.Iterator.html#method.fuse
|
2016-04-18 13:08:27 -05:00
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
pub struct Fuse<I> {
|
|
|
|
iter: I,
|
|
|
|
done: bool
|
|
|
|
}
|
|
|
|
|
2016-08-13 13:42:36 -05:00
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
impl<I> FusedIterator for Fuse<I> where I: Iterator {}
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I> Iterator for Fuse<I> where I: Iterator {
|
|
|
|
type Item = <I as Iterator>::Item;
|
|
|
|
|
|
|
|
#[inline]
|
2016-08-13 13:42:36 -05:00
|
|
|
default fn next(&mut self) -> Option<<I as Iterator>::Item> {
|
2016-04-18 13:08:27 -05:00
|
|
|
if self.done {
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
let next = self.iter.next();
|
|
|
|
self.done = next.is_none();
|
|
|
|
next
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
2016-08-13 13:42:36 -05:00
|
|
|
default fn nth(&mut self, n: usize) -> Option<I::Item> {
|
2016-04-18 13:08:27 -05:00
|
|
|
if self.done {
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
let nth = self.iter.nth(n);
|
|
|
|
self.done = nth.is_none();
|
|
|
|
nth
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
2016-08-13 13:42:36 -05:00
|
|
|
default fn last(self) -> Option<I::Item> {
|
2016-04-18 13:08:27 -05:00
|
|
|
if self.done {
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
self.iter.last()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
2016-08-13 13:42:36 -05:00
|
|
|
default fn count(self) -> usize {
|
2016-04-18 13:08:27 -05:00
|
|
|
if self.done {
|
|
|
|
0
|
|
|
|
} else {
|
|
|
|
self.iter.count()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
2016-08-13 13:42:36 -05:00
|
|
|
default fn size_hint(&self) -> (usize, Option<usize>) {
|
2016-04-18 13:08:27 -05:00
|
|
|
if self.done {
|
|
|
|
(0, Some(0))
|
|
|
|
} else {
|
|
|
|
self.iter.size_hint()
|
|
|
|
}
|
|
|
|
}
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
#[inline]
|
|
|
|
default fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
if self.done {
|
|
|
|
Try::from_ok(init)
|
|
|
|
} else {
|
|
|
|
let acc = self.iter.try_fold(init, fold)?;
|
|
|
|
self.done = true;
|
|
|
|
Try::from_ok(acc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
#[inline]
|
|
|
|
default fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
|
|
|
|
where Fold: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
if self.done {
|
|
|
|
init
|
|
|
|
} else {
|
|
|
|
self.iter.fold(init, fold)
|
|
|
|
}
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I> DoubleEndedIterator for Fuse<I> where I: DoubleEndedIterator {
|
|
|
|
#[inline]
|
2016-08-13 13:42:36 -05:00
|
|
|
default fn next_back(&mut self) -> Option<<I as Iterator>::Item> {
|
2016-04-18 13:08:27 -05:00
|
|
|
if self.done {
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
let next = self.iter.next_back();
|
|
|
|
self.done = next.is_none();
|
|
|
|
next
|
|
|
|
}
|
|
|
|
}
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
#[inline]
|
|
|
|
default fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
if self.done {
|
|
|
|
Try::from_ok(init)
|
|
|
|
} else {
|
|
|
|
let acc = self.iter.try_rfold(init, fold)?;
|
|
|
|
self.done = true;
|
|
|
|
Try::from_ok(acc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
#[inline]
|
|
|
|
default fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
|
|
|
|
where Fold: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
if self.done {
|
|
|
|
init
|
|
|
|
} else {
|
|
|
|
self.iter.rfold(init, fold)
|
|
|
|
}
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
2016-08-13 13:42:36 -05:00
|
|
|
unsafe impl<I> TrustedRandomAccess for Fuse<I>
|
|
|
|
where I: TrustedRandomAccess,
|
|
|
|
{
|
|
|
|
unsafe fn get_unchecked(&mut self, i: usize) -> I::Item {
|
|
|
|
self.iter.get_unchecked(i)
|
|
|
|
}
|
2016-10-17 03:58:21 -05:00
|
|
|
|
|
|
|
fn may_have_side_effect() -> bool {
|
|
|
|
I::may_have_side_effect()
|
|
|
|
}
|
2016-08-13 13:42:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
impl<I> Iterator for Fuse<I> where I: FusedIterator {
|
|
|
|
#[inline]
|
|
|
|
fn next(&mut self) -> Option<<I as Iterator>::Item> {
|
|
|
|
self.iter.next()
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn nth(&mut self, n: usize) -> Option<I::Item> {
|
|
|
|
self.iter.nth(n)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn last(self) -> Option<I::Item> {
|
|
|
|
self.iter.last()
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn count(self) -> usize {
|
|
|
|
self.iter.count()
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
self.iter.size_hint()
|
|
|
|
}
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
#[inline]
|
|
|
|
fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
self.iter.try_fold(init, fold)
|
|
|
|
}
|
|
|
|
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
#[inline]
|
|
|
|
fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
|
|
|
|
where Fold: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
self.iter.fold(init, fold)
|
|
|
|
}
|
2016-08-13 13:42:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[unstable(feature = "fused", reason = "recently added", issue = "35602")]
|
|
|
|
impl<I> DoubleEndedIterator for Fuse<I>
|
|
|
|
where I: DoubleEndedIterator + FusedIterator
|
|
|
|
{
|
|
|
|
#[inline]
|
|
|
|
fn next_back(&mut self) -> Option<<I as Iterator>::Item> {
|
|
|
|
self.iter.next_back()
|
|
|
|
}
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
#[inline]
|
|
|
|
fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
self.iter.try_rfold(init, fold)
|
|
|
|
}
|
|
|
|
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
#[inline]
|
|
|
|
fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
|
|
|
|
where Fold: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
self.iter.rfold(init, fold)
|
|
|
|
}
|
2016-08-13 13:42:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-04-18 13:08:27 -05:00
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
2016-11-22 16:48:33 -06:00
|
|
|
impl<I> ExactSizeIterator for Fuse<I> where I: ExactSizeIterator {
|
|
|
|
fn len(&self) -> usize {
|
|
|
|
self.iter.len()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn is_empty(&self) -> bool {
|
|
|
|
self.iter.is_empty()
|
|
|
|
}
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
|
|
|
|
/// An iterator that calls a function with a reference to each element before
|
|
|
|
/// yielding it.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// This `struct` is created by the [`inspect`] method on [`Iterator`]. See its
|
2016-04-18 13:08:27 -05:00
|
|
|
/// documentation for more.
|
|
|
|
///
|
2017-03-12 13:04:52 -05:00
|
|
|
/// [`inspect`]: trait.Iterator.html#method.inspect
|
2016-04-18 13:08:27 -05:00
|
|
|
/// [`Iterator`]: trait.Iterator.html
|
|
|
|
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct Inspect<I, F> {
|
|
|
|
iter: I,
|
|
|
|
f: F,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "core_impl_debug", since = "1.9.0")]
|
|
|
|
impl<I: fmt::Debug, F> fmt::Debug for Inspect<I, F> {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
f.debug_struct("Inspect")
|
|
|
|
.field("iter", &self.iter)
|
|
|
|
.finish()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<I: Iterator, F> Inspect<I, F> where F: FnMut(&I::Item) {
|
|
|
|
#[inline]
|
|
|
|
fn do_inspect(&mut self, elt: Option<I::Item>) -> Option<I::Item> {
|
|
|
|
if let Some(ref a) = elt {
|
|
|
|
(self.f)(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
elt
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I: Iterator, F> Iterator for Inspect<I, F> where F: FnMut(&I::Item) {
|
|
|
|
type Item = I::Item;
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn next(&mut self) -> Option<I::Item> {
|
|
|
|
let next = self.iter.next();
|
|
|
|
self.do_inspect(next)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
|
|
self.iter.size_hint()
|
|
|
|
}
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
#[inline]
|
|
|
|
fn try_fold<Acc, Fold, R>(&mut self, init: Acc, mut fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
let f = &mut self.f;
|
|
|
|
self.iter.try_fold(init, move |acc, item| { f(&item); fold(acc, item) })
|
|
|
|
}
|
|
|
|
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
#[inline]
|
|
|
|
fn fold<Acc, Fold>(self, init: Acc, mut fold: Fold) -> Acc
|
|
|
|
where Fold: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
let mut f = self.f;
|
|
|
|
self.iter.fold(init, move |acc, item| { f(&item); fold(acc, item) })
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I: DoubleEndedIterator, F> DoubleEndedIterator for Inspect<I, F>
|
|
|
|
where F: FnMut(&I::Item),
|
|
|
|
{
|
|
|
|
#[inline]
|
|
|
|
fn next_back(&mut self) -> Option<I::Item> {
|
|
|
|
let next = self.iter.next_back();
|
|
|
|
self.do_inspect(next)
|
|
|
|
}
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
|
2017-10-23 00:47:27 -05:00
|
|
|
#[inline]
|
|
|
|
fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, mut fold: Fold) -> R where
|
|
|
|
Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
|
|
|
|
{
|
|
|
|
let f = &mut self.f;
|
|
|
|
self.iter.try_rfold(init, move |acc, item| { f(&item); fold(acc, item) })
|
|
|
|
}
|
|
|
|
|
Add more custom folding to `core::iter` adaptors
Many of the iterator adaptors will perform faster folds if they forward
to their inner iterator's folds, especially for inner types like `Chain`
which are optimized too. The following types are newly specialized:
| Type | `fold` | `rfold` |
| ----------- | ------ | ------- |
| `Enumerate` | ✓ | ✓ |
| `Filter` | ✓ | ✓ |
| `FilterMap` | ✓ | ✓ |
| `FlatMap` | exists | ✓ |
| `Fuse` | ✓ | ✓ |
| `Inspect` | ✓ | ✓ |
| `Peekable` | ✓ | N/A¹ |
| `Skip` | ✓ | N/A² |
| `SkipWhile` | ✓ | N/A¹ |
¹ not a `DoubleEndedIterator`
² `Skip::next_back` doesn't pull skipped items at all, but this couldn't
be avoided if `Skip::rfold` were to call its inner iterator's `rfold`.
Benchmarks
----------
In the following results, plain `_sum` computes the sum of a million
integers -- note that `sum()` is implemented with `fold()`. The
`_ref_sum` variants do the same on a `by_ref()` iterator, which is
limited to calling `next()` one by one, without specialized `fold`.
The `chain` variants perform the same tests on two iterators chained
together, to show a greater benefit of forwarding `fold` internally.
test iter::bench_enumerate_chain_ref_sum ... bench: 2,216,264 ns/iter (+/- 29,228)
test iter::bench_enumerate_chain_sum ... bench: 922,380 ns/iter (+/- 2,676)
test iter::bench_enumerate_ref_sum ... bench: 476,094 ns/iter (+/- 7,110)
test iter::bench_enumerate_sum ... bench: 476,438 ns/iter (+/- 3,334)
test iter::bench_filter_chain_ref_sum ... bench: 2,266,095 ns/iter (+/- 6,051)
test iter::bench_filter_chain_sum ... bench: 745,594 ns/iter (+/- 2,013)
test iter::bench_filter_ref_sum ... bench: 889,696 ns/iter (+/- 1,188)
test iter::bench_filter_sum ... bench: 667,325 ns/iter (+/- 1,894)
test iter::bench_filter_map_chain_ref_sum ... bench: 2,259,195 ns/iter (+/- 353,440)
test iter::bench_filter_map_chain_sum ... bench: 1,223,280 ns/iter (+/- 1,972)
test iter::bench_filter_map_ref_sum ... bench: 611,607 ns/iter (+/- 2,507)
test iter::bench_filter_map_sum ... bench: 611,610 ns/iter (+/- 472)
test iter::bench_fuse_chain_ref_sum ... bench: 2,246,106 ns/iter (+/- 22,395)
test iter::bench_fuse_chain_sum ... bench: 634,887 ns/iter (+/- 1,341)
test iter::bench_fuse_ref_sum ... bench: 444,816 ns/iter (+/- 1,748)
test iter::bench_fuse_sum ... bench: 316,954 ns/iter (+/- 2,616)
test iter::bench_inspect_chain_ref_sum ... bench: 2,245,431 ns/iter (+/- 21,371)
test iter::bench_inspect_chain_sum ... bench: 631,645 ns/iter (+/- 4,928)
test iter::bench_inspect_ref_sum ... bench: 317,437 ns/iter (+/- 702)
test iter::bench_inspect_sum ... bench: 315,942 ns/iter (+/- 4,320)
test iter::bench_peekable_chain_ref_sum ... bench: 2,243,585 ns/iter (+/- 12,186)
test iter::bench_peekable_chain_sum ... bench: 634,848 ns/iter (+/- 1,712)
test iter::bench_peekable_ref_sum ... bench: 444,808 ns/iter (+/- 480)
test iter::bench_peekable_sum ... bench: 317,133 ns/iter (+/- 3,309)
test iter::bench_skip_chain_ref_sum ... bench: 1,778,734 ns/iter (+/- 2,198)
test iter::bench_skip_chain_sum ... bench: 761,850 ns/iter (+/- 1,645)
test iter::bench_skip_ref_sum ... bench: 478,207 ns/iter (+/- 119,252)
test iter::bench_skip_sum ... bench: 315,614 ns/iter (+/- 3,054)
test iter::bench_skip_while_chain_ref_sum ... bench: 2,486,370 ns/iter (+/- 4,845)
test iter::bench_skip_while_chain_sum ... bench: 633,915 ns/iter (+/- 5,892)
test iter::bench_skip_while_ref_sum ... bench: 666,926 ns/iter (+/- 804)
test iter::bench_skip_while_sum ... bench: 444,405 ns/iter (+/- 571)
2017-09-25 22:53:08 -05:00
|
|
|
#[inline]
|
|
|
|
fn rfold<Acc, Fold>(self, init: Acc, mut fold: Fold) -> Acc
|
|
|
|
where Fold: FnMut(Acc, Self::Item) -> Acc,
|
|
|
|
{
|
|
|
|
let mut f = self.f;
|
|
|
|
self.iter.rfold(init, move |acc, item| { f(&item); fold(acc, item) })
|
|
|
|
}
|
2016-04-18 13:08:27 -05:00
|
|
|
}
|
2016-04-18 16:44:02 -05:00
|
|
|
|
|
|
|
#[stable(feature = "rust1", since = "1.0.0")]
|
|
|
|
impl<I: ExactSizeIterator, F> ExactSizeIterator for Inspect<I, F>
|
2016-11-22 16:48:33 -06:00
|
|
|
where F: FnMut(&I::Item)
|
|
|
|
{
|
|
|
|
fn len(&self) -> usize {
|
|
|
|
self.iter.len()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn is_empty(&self) -> bool {
|
|
|
|
self.iter.is_empty()
|
|
|
|
}
|
|
|
|
}
|
2016-08-13 13:42:36 -05:00
|
|
|
|
|
|
|
#[unstable(feature = "fused", issue = "35602")]
|
|
|
|
impl<I: FusedIterator, F> FusedIterator for Inspect<I, F>
|
|
|
|
where F: FnMut(&I::Item) {}
|