Auto merge of #97159 - JohnTitor:rollup-ibl51vw, r=JohnTitor
Rollup of 6 pull requests Successful merges: - #96866 (Switch CI bucket uploads to intelligent tiering) - #97062 (Couple of refactorings to cg_ssa::base::codegen_crate) - #97127 (Revert "Auto merge of #96441 - ChrisDenton:sync-pipes, r=m-ou-se") - #97131 (Improve println! documentation) - #97139 (Move some settings DOM generation out of JS) - #97152 (Update cargo) Failed merges: r? `@ghost` `@rustbot` modify labels: rollup
This commit is contained in:
commit
e6327bc8b8
@ -15,8 +15,9 @@
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
|
||||
|
||||
use rustc_data_structures::sync::par_iter;
|
||||
#[cfg(parallel_compiler)]
|
||||
use rustc_data_structures::sync::{par_iter, ParallelIterator};
|
||||
use rustc_data_structures::sync::ParallelIterator;
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
|
||||
use rustc_hir::lang_items::LangItem;
|
||||
@ -607,6 +608,14 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
|
||||
second_half.iter().rev().interleave(first_half).copied().collect()
|
||||
};
|
||||
|
||||
// Calculate the CGU reuse
|
||||
let cgu_reuse = tcx.sess.time("find_cgu_reuse", || {
|
||||
codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect::<Vec<_>>()
|
||||
});
|
||||
|
||||
let mut total_codegen_time = Duration::new(0, 0);
|
||||
let start_rss = tcx.sess.time_passes().then(|| get_resident_set_size());
|
||||
|
||||
// The non-parallel compiler can only translate codegen units to LLVM IR
|
||||
// on a single thread, leading to a staircase effect where the N LLVM
|
||||
// threads have to wait on the single codegen threads to generate work
|
||||
@ -617,8 +626,7 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
|
||||
// This likely is a temporary measure. Once we don't have to support the
|
||||
// non-parallel compiler anymore, we can compile CGUs end-to-end in
|
||||
// parallel and get rid of the complicated scheduling logic.
|
||||
#[cfg(parallel_compiler)]
|
||||
let pre_compile_cgus = |cgu_reuse: &[CguReuse]| {
|
||||
let mut pre_compiled_cgus = if cfg!(parallel_compiler) {
|
||||
tcx.sess.time("compile_first_CGU_batch", || {
|
||||
// Try to find one CGU to compile per thread.
|
||||
let cgus: Vec<_> = cgu_reuse
|
||||
@ -638,48 +646,31 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
|
||||
})
|
||||
.collect();
|
||||
|
||||
(pre_compiled_cgus, start_time.elapsed())
|
||||
total_codegen_time += start_time.elapsed();
|
||||
|
||||
pre_compiled_cgus
|
||||
})
|
||||
} else {
|
||||
FxHashMap::default()
|
||||
};
|
||||
|
||||
#[cfg(not(parallel_compiler))]
|
||||
let pre_compile_cgus = |_: &[CguReuse]| (FxHashMap::default(), Duration::new(0, 0));
|
||||
|
||||
let mut cgu_reuse = Vec::new();
|
||||
let mut pre_compiled_cgus: Option<FxHashMap<usize, _>> = None;
|
||||
let mut total_codegen_time = Duration::new(0, 0);
|
||||
let start_rss = tcx.sess.time_passes().then(|| get_resident_set_size());
|
||||
|
||||
for (i, cgu) in codegen_units.iter().enumerate() {
|
||||
ongoing_codegen.wait_for_signal_to_codegen_item();
|
||||
ongoing_codegen.check_for_errors(tcx.sess);
|
||||
|
||||
// Do some setup work in the first iteration
|
||||
if pre_compiled_cgus.is_none() {
|
||||
// Calculate the CGU reuse
|
||||
cgu_reuse = tcx.sess.time("find_cgu_reuse", || {
|
||||
codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect()
|
||||
});
|
||||
// Pre compile some CGUs
|
||||
let (compiled_cgus, codegen_time) = pre_compile_cgus(&cgu_reuse);
|
||||
pre_compiled_cgus = Some(compiled_cgus);
|
||||
total_codegen_time += codegen_time;
|
||||
}
|
||||
|
||||
let cgu_reuse = cgu_reuse[i];
|
||||
tcx.sess.cgu_reuse_tracker.set_actual_reuse(cgu.name().as_str(), cgu_reuse);
|
||||
|
||||
match cgu_reuse {
|
||||
CguReuse::No => {
|
||||
let (module, cost) =
|
||||
if let Some(cgu) = pre_compiled_cgus.as_mut().unwrap().remove(&i) {
|
||||
cgu
|
||||
} else {
|
||||
let start_time = Instant::now();
|
||||
let module = backend.compile_codegen_unit(tcx, cgu.name());
|
||||
total_codegen_time += start_time.elapsed();
|
||||
module
|
||||
};
|
||||
let (module, cost) = if let Some(cgu) = pre_compiled_cgus.remove(&i) {
|
||||
cgu
|
||||
} else {
|
||||
let start_time = Instant::now();
|
||||
let module = backend.compile_codegen_unit(tcx, cgu.name());
|
||||
total_codegen_time += start_time.elapsed();
|
||||
module
|
||||
};
|
||||
// This will unwind if there are errors, which triggers our `AbortCodegenOnDrop`
|
||||
// guard. Unfortunately, just skipping the `submit_codegened_module_to_llvm` makes
|
||||
// compilation hang on post-monomorphization errors.
|
||||
|
@ -72,7 +72,7 @@ macro_rules! print {
|
||||
/// On all platforms, the newline is the LINE FEED character (`\n`/`U+000A`) alone
|
||||
/// (no additional CARRIAGE RETURN (`\r`/`U+000D`)).
|
||||
///
|
||||
/// Use the [`format!`] syntax to write data to the standard output.
|
||||
/// This macro uses the same syntax as [`format!`], but writes to the standard output instead.
|
||||
/// See [`std::fmt`] for more information.
|
||||
///
|
||||
/// Use `println!` only for the primary output of your program. Use
|
||||
|
@ -204,19 +204,6 @@ pub(crate) fn duplicate(
|
||||
})?;
|
||||
unsafe { Ok(Self::from_raw_handle(ret)) }
|
||||
}
|
||||
|
||||
/// Allow child processes to inherit the handle.
|
||||
#[cfg(not(target_vendor = "uwp"))]
|
||||
pub(crate) fn set_inheritable(&self) -> io::Result<()> {
|
||||
cvt(unsafe {
|
||||
c::SetHandleInformation(
|
||||
self.as_raw_handle(),
|
||||
c::HANDLE_FLAG_INHERIT,
|
||||
c::HANDLE_FLAG_INHERIT,
|
||||
)
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<HandleOrInvalid> for OwnedHandle {
|
||||
|
@ -1026,12 +1026,6 @@ pub fn WaitForMultipleObjects(
|
||||
bWaitAll: BOOL,
|
||||
dwMilliseconds: DWORD,
|
||||
) -> DWORD;
|
||||
pub fn CreatePipe(
|
||||
hReadPipe: *mut HANDLE,
|
||||
hWritePipe: *mut HANDLE,
|
||||
lpPipeAttributes: *const SECURITY_ATTRIBUTES,
|
||||
nSize: DWORD,
|
||||
) -> BOOL;
|
||||
pub fn CreateNamedPipeW(
|
||||
lpName: LPCWSTR,
|
||||
dwOpenMode: DWORD,
|
||||
|
@ -221,11 +221,6 @@ pub fn duplicate(
|
||||
Ok(Self(self.0.duplicate(access, inherit, options)?))
|
||||
}
|
||||
|
||||
#[cfg(not(target_vendor = "uwp"))]
|
||||
pub(crate) fn set_inheritable(&self) -> io::Result<()> {
|
||||
self.0.set_inheritable()
|
||||
}
|
||||
|
||||
/// Performs a synchronous read.
|
||||
///
|
||||
/// If the handle is opened for asynchronous I/O then this abort the process.
|
||||
|
@ -18,20 +18,13 @@
|
||||
// Anonymous pipes
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// A 64kb pipe capacity is the same as a typical Linux default.
|
||||
const PIPE_BUFFER_CAPACITY: u32 = 64 * 1024;
|
||||
|
||||
pub enum AnonPipe {
|
||||
Sync(Handle),
|
||||
Async(Handle),
|
||||
pub struct AnonPipe {
|
||||
inner: Handle,
|
||||
}
|
||||
|
||||
impl IntoInner<Handle> for AnonPipe {
|
||||
fn into_inner(self) -> Handle {
|
||||
match self {
|
||||
Self::Sync(handle) => handle,
|
||||
Self::Async(handle) => handle,
|
||||
}
|
||||
self.inner
|
||||
}
|
||||
}
|
||||
|
||||
@ -39,46 +32,6 @@ pub struct Pipes {
|
||||
pub ours: AnonPipe,
|
||||
pub theirs: AnonPipe,
|
||||
}
|
||||
impl Pipes {
|
||||
/// Create a new pair of pipes where both pipes are synchronous.
|
||||
///
|
||||
/// These must not be used asynchronously.
|
||||
pub fn new_synchronous(
|
||||
ours_readable: bool,
|
||||
their_handle_inheritable: bool,
|
||||
) -> io::Result<Self> {
|
||||
unsafe {
|
||||
// If `CreatePipe` succeeds, these will be our pipes.
|
||||
let mut read = ptr::null_mut();
|
||||
let mut write = ptr::null_mut();
|
||||
|
||||
if c::CreatePipe(&mut read, &mut write, ptr::null(), PIPE_BUFFER_CAPACITY) == 0 {
|
||||
Err(io::Error::last_os_error())
|
||||
} else {
|
||||
let (ours, theirs) = if ours_readable { (read, write) } else { (write, read) };
|
||||
let ours = Handle::from_raw_handle(ours);
|
||||
#[cfg(not(target_vendor = "uwp"))]
|
||||
let theirs = Handle::from_raw_handle(theirs);
|
||||
#[cfg(target_vendor = "uwp")]
|
||||
let mut theirs = Handle::from_raw_handle(theirs);
|
||||
|
||||
if their_handle_inheritable {
|
||||
#[cfg(not(target_vendor = "uwp"))]
|
||||
{
|
||||
theirs.set_inheritable()?;
|
||||
}
|
||||
|
||||
#[cfg(target_vendor = "uwp")]
|
||||
{
|
||||
theirs = theirs.duplicate(0, true, c::DUPLICATE_SAME_ACCESS)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Pipes { ours: AnonPipe::Sync(ours), theirs: AnonPipe::Sync(theirs) })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Although this looks similar to `anon_pipe` in the Unix module it's actually
|
||||
/// subtly different. Here we'll return two pipes in the `Pipes` return value,
|
||||
@ -100,6 +53,9 @@ pub fn new_synchronous(
|
||||
/// with `OVERLAPPED` instances, but also works out ok if it's only ever used
|
||||
/// once at a time (which we do indeed guarantee).
|
||||
pub fn anon_pipe(ours_readable: bool, their_handle_inheritable: bool) -> io::Result<Pipes> {
|
||||
// A 64kb pipe capacity is the same as a typical Linux default.
|
||||
const PIPE_BUFFER_CAPACITY: u32 = 64 * 1024;
|
||||
|
||||
// Note that we specifically do *not* use `CreatePipe` here because
|
||||
// unfortunately the anonymous pipes returned do not support overlapped
|
||||
// operations. Instead, we create a "hopefully unique" name and create a
|
||||
@ -200,9 +156,12 @@ pub fn anon_pipe(ours_readable: bool, their_handle_inheritable: bool) -> io::Res
|
||||
};
|
||||
opts.security_attributes(&mut sa);
|
||||
let theirs = File::open(Path::new(&name), &opts)?;
|
||||
let theirs = AnonPipe::Sync(theirs.into_inner());
|
||||
let theirs = AnonPipe { inner: theirs.into_inner() };
|
||||
|
||||
Ok(Pipes { ours: AnonPipe::Async(ours), theirs })
|
||||
Ok(Pipes {
|
||||
ours: AnonPipe { inner: ours },
|
||||
theirs: AnonPipe { inner: theirs.into_inner() },
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -212,12 +171,12 @@ pub fn anon_pipe(ours_readable: bool, their_handle_inheritable: bool) -> io::Res
|
||||
/// This is achieved by creating a new set of pipes and spawning a thread that
|
||||
/// relays messages between the source and the synchronous pipe.
|
||||
pub fn spawn_pipe_relay(
|
||||
source: &Handle,
|
||||
source: &AnonPipe,
|
||||
ours_readable: bool,
|
||||
their_handle_inheritable: bool,
|
||||
) -> io::Result<AnonPipe> {
|
||||
// We need this handle to live for the lifetime of the thread spawned below.
|
||||
let source = AnonPipe::Async(source.duplicate(0, true, c::DUPLICATE_SAME_ACCESS)?);
|
||||
let source = source.duplicate()?;
|
||||
|
||||
// create a new pair of anon pipes.
|
||||
let Pipes { theirs, ours } = anon_pipe(ours_readable, their_handle_inheritable)?;
|
||||
@ -268,24 +227,19 @@ fn random_number() -> usize {
|
||||
|
||||
impl AnonPipe {
|
||||
pub fn handle(&self) -> &Handle {
|
||||
match self {
|
||||
Self::Async(ref handle) => handle,
|
||||
Self::Sync(ref handle) => handle,
|
||||
}
|
||||
&self.inner
|
||||
}
|
||||
pub fn into_handle(self) -> Handle {
|
||||
self.into_inner()
|
||||
self.inner
|
||||
}
|
||||
fn duplicate(&self) -> io::Result<Self> {
|
||||
self.inner.duplicate(0, false, c::DUPLICATE_SAME_ACCESS).map(|inner| AnonPipe { inner })
|
||||
}
|
||||
|
||||
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let result = unsafe {
|
||||
let len = crate::cmp::min(buf.len(), c::DWORD::MAX as usize) as c::DWORD;
|
||||
match self {
|
||||
Self::Sync(ref handle) => handle.read(buf),
|
||||
Self::Async(_) => {
|
||||
self.alertable_io_internal(c::ReadFileEx, buf.as_mut_ptr() as _, len)
|
||||
}
|
||||
}
|
||||
self.alertable_io_internal(c::ReadFileEx, buf.as_mut_ptr() as _, len)
|
||||
};
|
||||
|
||||
match result {
|
||||
@ -299,33 +253,28 @@ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
}
|
||||
|
||||
pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
|
||||
io::default_read_vectored(|buf| self.read(buf), bufs)
|
||||
self.inner.read_vectored(bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_read_vectored(&self) -> bool {
|
||||
false
|
||||
self.inner.is_read_vectored()
|
||||
}
|
||||
|
||||
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
|
||||
unsafe {
|
||||
let len = crate::cmp::min(buf.len(), c::DWORD::MAX as usize) as c::DWORD;
|
||||
match self {
|
||||
Self::Sync(ref handle) => handle.write(buf),
|
||||
Self::Async(_) => {
|
||||
self.alertable_io_internal(c::WriteFileEx, buf.as_ptr() as _, len)
|
||||
}
|
||||
}
|
||||
self.alertable_io_internal(c::WriteFileEx, buf.as_ptr() as _, len)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
|
||||
io::default_write_vectored(|buf| self.write(buf), bufs)
|
||||
self.inner.write_vectored(bufs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_write_vectored(&self) -> bool {
|
||||
false
|
||||
self.inner.is_write_vectored()
|
||||
}
|
||||
|
||||
/// Synchronizes asynchronous reads or writes using our anonymous pipe.
|
||||
@ -397,7 +346,7 @@ struct AsyncResult {
|
||||
|
||||
// Asynchronous read of the pipe.
|
||||
// If successful, `callback` will be called once it completes.
|
||||
let result = io(self.handle().as_handle(), buf, len, &mut overlapped, callback);
|
||||
let result = io(self.inner.as_handle(), buf, len, &mut overlapped, callback);
|
||||
if result == c::FALSE {
|
||||
// We can return here because the call failed.
|
||||
// After this we must not return until the I/O completes.
|
||||
|
@ -23,7 +23,7 @@
|
||||
use crate::sys::fs::{File, OpenOptions};
|
||||
use crate::sys::handle::Handle;
|
||||
use crate::sys::path;
|
||||
use crate::sys::pipe::{self, AnonPipe, Pipes};
|
||||
use crate::sys::pipe::{self, AnonPipe};
|
||||
use crate::sys::stdio;
|
||||
use crate::sys_common::mutex::StaticMutex;
|
||||
use crate::sys_common::process::{CommandEnv, CommandEnvs};
|
||||
@ -172,7 +172,7 @@ pub enum Stdio {
|
||||
Inherit,
|
||||
Null,
|
||||
MakePipe,
|
||||
AsyncPipe(Handle),
|
||||
Pipe(AnonPipe),
|
||||
Handle(Handle),
|
||||
}
|
||||
|
||||
@ -527,33 +527,13 @@ fn to_handle(&self, stdio_id: c::DWORD, pipe: &mut Option<AnonPipe>) -> io::Resu
|
||||
},
|
||||
|
||||
Stdio::MakePipe => {
|
||||
// Handles that are passed to a child process must be synchronous
|
||||
// because they will be read synchronously (see #95759).
|
||||
// Therefore we prefer to make both ends of a pipe synchronous
|
||||
// just in case our end of the pipe is passed to another process.
|
||||
//
|
||||
// However, we may need to read from both the child's stdout and
|
||||
// stderr simultaneously when waiting for output. This requires
|
||||
// async reads so as to avoid blocking either pipe.
|
||||
//
|
||||
// The solution used here is to make handles synchronous
|
||||
// except for our side of the stdout and sterr pipes.
|
||||
// If our side of those pipes do end up being given to another
|
||||
// process then we use a "pipe relay" to synchronize access
|
||||
// (see `Stdio::AsyncPipe` below).
|
||||
let pipes = if stdio_id == c::STD_INPUT_HANDLE {
|
||||
// For stdin both sides of the pipe are synchronous.
|
||||
Pipes::new_synchronous(false, true)?
|
||||
} else {
|
||||
// For stdout/stderr our side of the pipe is async and their side is synchronous.
|
||||
pipe::anon_pipe(true, true)?
|
||||
};
|
||||
let ours_readable = stdio_id != c::STD_INPUT_HANDLE;
|
||||
let pipes = pipe::anon_pipe(ours_readable, true)?;
|
||||
*pipe = Some(pipes.ours);
|
||||
Ok(pipes.theirs.into_handle())
|
||||
}
|
||||
|
||||
Stdio::AsyncPipe(ref source) => {
|
||||
// We need to synchronize asynchronous pipes by using a pipe relay.
|
||||
Stdio::Pipe(ref source) => {
|
||||
let ours_readable = stdio_id != c::STD_INPUT_HANDLE;
|
||||
pipe::spawn_pipe_relay(source, ours_readable, true).map(AnonPipe::into_handle)
|
||||
}
|
||||
@ -582,13 +562,7 @@ fn to_handle(&self, stdio_id: c::DWORD, pipe: &mut Option<AnonPipe>) -> io::Resu
|
||||
|
||||
impl From<AnonPipe> for Stdio {
|
||||
fn from(pipe: AnonPipe) -> Stdio {
|
||||
// Note that it's very important we don't give async handles to child processes.
|
||||
// Therefore if the pipe is asynchronous we must have a way to turn it synchronous.
|
||||
// See #95759.
|
||||
match pipe {
|
||||
AnonPipe::Sync(handle) => Stdio::Handle(handle),
|
||||
AnonPipe::Async(handle) => Stdio::AsyncPipe(handle),
|
||||
}
|
||||
Stdio::Pipe(pipe)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1 +1 @@
|
||||
0.9.2
|
||||
0.9.3
|
@ -38,4 +38,5 @@ if [[ "${DEPLOY_ALT-0}" -eq "1" ]]; then
|
||||
fi
|
||||
deploy_url="s3://${DEPLOY_BUCKET}/${deploy_dir}/$(ciCommit)"
|
||||
|
||||
retry aws s3 cp --no-progress --recursive --acl public-read "${upload_dir}" "${deploy_url}"
|
||||
retry aws s3 cp --storage-class INTELLIGENT_TIERING \
|
||||
--no-progress --recursive --acl public-read "${upload_dir}" "${deploy_url}"
|
||||
|
@ -596,9 +596,19 @@ fn after_krate(&mut self) -> Result<(), Error> {
|
||||
|buf: &mut Buffer| {
|
||||
write!(
|
||||
buf,
|
||||
"<link rel=\"stylesheet\" type=\"text/css\" \
|
||||
href=\"{root_path}settings{suffix}.css\">\
|
||||
<script defer src=\"{root_path}settings{suffix}.js\"></script>",
|
||||
"<div class=\"main-heading\">\
|
||||
<h1 class=\"fqn\">\
|
||||
<span class=\"in-band\">Rustdoc settings</span>\
|
||||
</h1>\
|
||||
<span class=\"out-of-band\">\
|
||||
<a id=\"back\" href=\"javascript:void(0)\" onclick=\"history.back();\">\
|
||||
Back\
|
||||
</a>\
|
||||
</span>\
|
||||
</div>\
|
||||
<link rel=\"stylesheet\" type=\"text/css\" \
|
||||
href=\"{root_path}settings{suffix}.css\">\
|
||||
<script defer src=\"{root_path}settings{suffix}.js\"></script>",
|
||||
root_path = page.static_root_path.unwrap_or(""),
|
||||
suffix = page.resource_suffix,
|
||||
)
|
||||
|
@ -206,22 +206,8 @@
|
||||
];
|
||||
|
||||
// Then we build the DOM.
|
||||
let innerHTML = "";
|
||||
let elementKind = "div";
|
||||
|
||||
if (isSettingsPage) {
|
||||
elementKind = "section";
|
||||
innerHTML = `<div class="main-heading">
|
||||
<h1 class="fqn">
|
||||
<span class="in-band">Rustdoc settings</span>
|
||||
</h1>
|
||||
<span class="out-of-band">
|
||||
<a id="back" href="javascript:void(0)" onclick="history.back();">Back</a>
|
||||
</span>
|
||||
</div>`;
|
||||
}
|
||||
innerHTML += `<div class="settings">${buildSettingsPageSections(settings)}</div>`;
|
||||
|
||||
const elementKind = isSettingsPage ? "section" : "div";
|
||||
const innerHTML = `<div class="settings">${buildSettingsPageSections(settings)}</div>`;
|
||||
const el = document.createElement(elementKind);
|
||||
el.id = "settings";
|
||||
el.innerHTML = innerHTML;
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit 3f052d8eed98c6a24f8b332fb2e6e6249d12d8c1
|
||||
Subproject commit a4c1cd0eb6b18082a7e693f5a665548fe1534be4
|
Loading…
Reference in New Issue
Block a user