Auto merge of #14218 - Veykril:root-dedup, r=Veykril
Deduplicate source roots that have overlapping include paths Fixes flycheck not working for the rustc workspace when using `linkedProjects`
This commit is contained in:
commit
1d07c5bc25
@ -76,7 +76,7 @@ impl fmt::Display for FlycheckConfig {
|
||||
#[derive(Debug)]
|
||||
pub struct FlycheckHandle {
|
||||
// XXX: drop order is significant
|
||||
sender: Sender<Restart>,
|
||||
sender: Sender<StateChange>,
|
||||
_thread: jod_thread::JoinHandle,
|
||||
id: usize,
|
||||
}
|
||||
@ -89,7 +89,7 @@ impl FlycheckHandle {
|
||||
workspace_root: AbsPathBuf,
|
||||
) -> FlycheckHandle {
|
||||
let actor = FlycheckActor::new(id, sender, config, workspace_root);
|
||||
let (sender, receiver) = unbounded::<Restart>();
|
||||
let (sender, receiver) = unbounded::<StateChange>();
|
||||
let thread = jod_thread::Builder::new()
|
||||
.name("Flycheck".to_owned())
|
||||
.spawn(move || actor.run(receiver))
|
||||
@ -99,12 +99,12 @@ impl FlycheckHandle {
|
||||
|
||||
/// Schedule a re-start of the cargo check worker.
|
||||
pub fn restart(&self) {
|
||||
self.sender.send(Restart::Yes).unwrap();
|
||||
self.sender.send(StateChange::Restart).unwrap();
|
||||
}
|
||||
|
||||
/// Stop this cargo check worker.
|
||||
pub fn cancel(&self) {
|
||||
self.sender.send(Restart::No).unwrap();
|
||||
self.sender.send(StateChange::Cancel).unwrap();
|
||||
}
|
||||
|
||||
pub fn id(&self) -> usize {
|
||||
@ -149,9 +149,9 @@ pub enum Progress {
|
||||
DidFailToRestart(String),
|
||||
}
|
||||
|
||||
enum Restart {
|
||||
Yes,
|
||||
No,
|
||||
enum StateChange {
|
||||
Restart,
|
||||
Cancel,
|
||||
}
|
||||
|
||||
/// A [`FlycheckActor`] is a single check instance of a workspace.
|
||||
@ -172,7 +172,7 @@ struct FlycheckActor {
|
||||
}
|
||||
|
||||
enum Event {
|
||||
Restart(Restart),
|
||||
RequestStateChange(StateChange),
|
||||
CheckEvent(Option<CargoMessage>),
|
||||
}
|
||||
|
||||
@ -191,30 +191,31 @@ impl FlycheckActor {
|
||||
self.send(Message::Progress { id: self.id, progress });
|
||||
}
|
||||
|
||||
fn next_event(&self, inbox: &Receiver<Restart>) -> Option<Event> {
|
||||
fn next_event(&self, inbox: &Receiver<StateChange>) -> Option<Event> {
|
||||
let check_chan = self.cargo_handle.as_ref().map(|cargo| &cargo.receiver);
|
||||
if let Ok(msg) = inbox.try_recv() {
|
||||
// give restarts a preference so check outputs don't block a restart or stop
|
||||
return Some(Event::Restart(msg));
|
||||
return Some(Event::RequestStateChange(msg));
|
||||
}
|
||||
select! {
|
||||
recv(inbox) -> msg => msg.ok().map(Event::Restart),
|
||||
recv(inbox) -> msg => msg.ok().map(Event::RequestStateChange),
|
||||
recv(check_chan.unwrap_or(&never())) -> msg => Some(Event::CheckEvent(msg.ok())),
|
||||
}
|
||||
}
|
||||
|
||||
fn run(mut self, inbox: Receiver<Restart>) {
|
||||
fn run(mut self, inbox: Receiver<StateChange>) {
|
||||
'event: while let Some(event) = self.next_event(&inbox) {
|
||||
match event {
|
||||
Event::Restart(Restart::No) => {
|
||||
Event::RequestStateChange(StateChange::Cancel) => {
|
||||
tracing::debug!(flycheck_id = self.id, "flycheck cancelled");
|
||||
self.cancel_check_process();
|
||||
}
|
||||
Event::Restart(Restart::Yes) => {
|
||||
Event::RequestStateChange(StateChange::Restart) => {
|
||||
// Cancel the previously spawned process
|
||||
self.cancel_check_process();
|
||||
while let Ok(restart) = inbox.recv_timeout(Duration::from_millis(50)) {
|
||||
// restart chained with a stop, so just cancel
|
||||
if let Restart::No = restart {
|
||||
if let StateChange::Cancel = restart {
|
||||
continue 'event;
|
||||
}
|
||||
}
|
||||
@ -255,10 +256,20 @@ impl FlycheckActor {
|
||||
}
|
||||
Event::CheckEvent(Some(message)) => match message {
|
||||
CargoMessage::CompilerArtifact(msg) => {
|
||||
tracing::trace!(
|
||||
flycheck_id = self.id,
|
||||
artifact = msg.target.name,
|
||||
"artifact received"
|
||||
);
|
||||
self.report_progress(Progress::DidCheckCrate(msg.target.name));
|
||||
}
|
||||
|
||||
CargoMessage::Diagnostic(msg) => {
|
||||
tracing::trace!(
|
||||
flycheck_id = self.id,
|
||||
message = msg.message,
|
||||
"diagnostic received"
|
||||
);
|
||||
self.send(Message::AddDiagnostic {
|
||||
id: self.id,
|
||||
workspace_root: self.root.clone(),
|
||||
|
@ -237,7 +237,7 @@ impl ProjectWorkspace {
|
||||
};
|
||||
|
||||
if let Some(sysroot) = &sysroot {
|
||||
tracing::info!(src_root = %sysroot.src_root().display(), root = %sysroot.root().display(), "Using sysroot");
|
||||
tracing::info!(workspace = %cargo_toml.display(), src_root = %sysroot.src_root().display(), root = %sysroot.root().display(), "Using sysroot");
|
||||
}
|
||||
|
||||
let rustc_dir = match &config.rustc_source {
|
||||
@ -247,27 +247,31 @@ impl ProjectWorkspace {
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
if let Some(rustc_dir) = &rustc_dir {
|
||||
tracing::info!(rustc_dir = %rustc_dir.display(), "Using rustc source");
|
||||
}
|
||||
|
||||
let rustc = match rustc_dir {
|
||||
Some(rustc_dir) => match CargoWorkspace::fetch_metadata(
|
||||
&rustc_dir,
|
||||
cargo_toml.parent(),
|
||||
config,
|
||||
progress,
|
||||
) {
|
||||
Ok(meta) => Some(CargoWorkspace::new(meta)),
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
%e,
|
||||
"Failed to read Cargo metadata from rustc source at {}",
|
||||
rustc_dir.display()
|
||||
);
|
||||
None
|
||||
Some(rustc_dir) if rustc_dir == cargo_toml => {
|
||||
tracing::info!(rustc_dir = %rustc_dir.display(), "Workspace is the rustc workspace itself, not adding the rustc workspace separately");
|
||||
None
|
||||
}
|
||||
Some(rustc_dir) => {
|
||||
tracing::info!(workspace = %cargo_toml.display(), rustc_dir = %rustc_dir.display(), "Using rustc source");
|
||||
match CargoWorkspace::fetch_metadata(
|
||||
&rustc_dir,
|
||||
cargo_toml.parent(),
|
||||
config,
|
||||
progress,
|
||||
) {
|
||||
Ok(meta) => Some(CargoWorkspace::new(meta)),
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
%e,
|
||||
"Failed to read Cargo metadata from rustc source at {}",
|
||||
rustc_dir.display()
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
|
||||
|
@ -854,27 +854,27 @@ impl Config {
|
||||
}
|
||||
pub fn linked_projects(&self) -> Vec<LinkedProject> {
|
||||
match self.data.linkedProjects.as_slice() {
|
||||
[] => match self.discovered_projects.as_ref() {
|
||||
Some(discovered_projects) => {
|
||||
let exclude_dirs: Vec<_> = self
|
||||
.data
|
||||
.files_excludeDirs
|
||||
[] => {
|
||||
match self.discovered_projects.as_ref() {
|
||||
Some(discovered_projects) => {
|
||||
let exclude_dirs: Vec<_> = self
|
||||
.data
|
||||
.files_excludeDirs
|
||||
.iter()
|
||||
.map(|p| self.root_path.join(p))
|
||||
.collect();
|
||||
discovered_projects
|
||||
.iter()
|
||||
.map(|p| self.root_path.join(p))
|
||||
.collect();
|
||||
discovered_projects
|
||||
.iter()
|
||||
.filter(|p| {
|
||||
let (ProjectManifest::ProjectJson(path)
|
||||
| ProjectManifest::CargoToml(path)) = p;
|
||||
.filter(|(ProjectManifest::ProjectJson(path) | ProjectManifest::CargoToml(path))| {
|
||||
!exclude_dirs.iter().any(|p| path.starts_with(p))
|
||||
})
|
||||
.cloned()
|
||||
.map(LinkedProject::from)
|
||||
.collect()
|
||||
}
|
||||
None => Vec::new(),
|
||||
}
|
||||
None => Vec::new(),
|
||||
},
|
||||
}
|
||||
linked_projects => linked_projects
|
||||
.iter()
|
||||
.filter_map(|linked_project| match linked_project {
|
||||
|
@ -12,17 +12,21 @@
|
||||
//! correct. Instead, we try to provide a best-effort service. Even if the
|
||||
//! project is currently loading and we don't have a full project model, we
|
||||
//! still want to respond to various requests.
|
||||
use std::{mem, sync::Arc};
|
||||
use std::{collections::hash_map::Entry, mem, sync::Arc};
|
||||
|
||||
use flycheck::{FlycheckConfig, FlycheckHandle};
|
||||
use hir::db::DefDatabase;
|
||||
use ide::Change;
|
||||
use ide_db::base_db::{
|
||||
CrateGraph, Env, ProcMacro, ProcMacroExpander, ProcMacroExpansionError, ProcMacroKind,
|
||||
ProcMacroLoadResult, SourceRoot, VfsPath,
|
||||
use ide_db::{
|
||||
base_db::{
|
||||
CrateGraph, Env, ProcMacro, ProcMacroExpander, ProcMacroExpansionError, ProcMacroKind,
|
||||
ProcMacroLoadResult, SourceRoot, VfsPath,
|
||||
},
|
||||
FxHashMap,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use proc_macro_api::{MacroDylib, ProcMacroServer};
|
||||
use project_model::{ProjectWorkspace, WorkspaceBuildScripts};
|
||||
use project_model::{PackageRoot, ProjectWorkspace, WorkspaceBuildScripts};
|
||||
use syntax::SmolStr;
|
||||
use vfs::{file_set::FileSetConfig, AbsPath, AbsPathBuf, ChangeKind};
|
||||
|
||||
@ -494,7 +498,69 @@ impl ProjectFolders {
|
||||
let mut fsc = FileSetConfig::builder();
|
||||
let mut local_filesets = vec![];
|
||||
|
||||
for root in workspaces.iter().flat_map(|ws| ws.to_roots()) {
|
||||
// Dedup source roots
|
||||
// Depending on the project setup, we can have duplicated source roots, or for example in
|
||||
// the case of the rustc workspace, we can end up with two source roots that are almost the
|
||||
// same but not quite, like:
|
||||
// PackageRoot { is_local: false, include: [AbsPathBuf(".../rust/src/tools/miri/cargo-miri")], exclude: [] }
|
||||
// PackageRoot {
|
||||
// is_local: true,
|
||||
// include: [AbsPathBuf(".../rust/src/tools/miri/cargo-miri"), AbsPathBuf(".../rust/build/x86_64-pc-windows-msvc/stage0-tools/x86_64-pc-windows-msvc/release/build/cargo-miri-85801cd3d2d1dae4/out")],
|
||||
// exclude: [AbsPathBuf(".../rust/src/tools/miri/cargo-miri/.git"), AbsPathBuf(".../rust/src/tools/miri/cargo-miri/target")]
|
||||
// }
|
||||
//
|
||||
// The first one comes from the explicit rustc workspace which points to the rustc workspace itself
|
||||
// The second comes from the rustc workspace that we load as the actual project workspace
|
||||
// These `is_local` differing in this kind of way gives us problems, especially when trying to filter diagnostics as we don't report diagnostics for external libraries.
|
||||
// So we need to deduplicate these, usually it would be enough to deduplicate by `include`, but as the rustc example shows here that doesn't work,
|
||||
// so we need to also coalesce the includes if they overlap.
|
||||
|
||||
let mut roots: Vec<_> = workspaces
|
||||
.iter()
|
||||
.flat_map(|ws| ws.to_roots())
|
||||
.update(|root| root.include.sort())
|
||||
.sorted_by(|a, b| a.include.cmp(&b.include))
|
||||
.collect();
|
||||
|
||||
// map that tracks indices of overlapping roots
|
||||
let mut overlap_map = FxHashMap::<_, Vec<_>>::default();
|
||||
let mut done = false;
|
||||
|
||||
while !mem::replace(&mut done, true) {
|
||||
// maps include paths to indices of the corresponding root
|
||||
let mut include_to_idx = FxHashMap::default();
|
||||
// Find and note down the indices of overlapping roots
|
||||
for (idx, root) in roots.iter().filter(|it| !it.include.is_empty()).enumerate() {
|
||||
for include in &root.include {
|
||||
match include_to_idx.entry(include) {
|
||||
Entry::Occupied(e) => {
|
||||
overlap_map.entry(*e.get()).or_default().push(idx);
|
||||
}
|
||||
Entry::Vacant(e) => {
|
||||
e.insert(idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (k, v) in overlap_map.drain() {
|
||||
done = false;
|
||||
for v in v {
|
||||
let r = mem::replace(
|
||||
&mut roots[v],
|
||||
PackageRoot { is_local: false, include: vec![], exclude: vec![] },
|
||||
);
|
||||
roots[k].is_local |= r.is_local;
|
||||
roots[k].include.extend(r.include);
|
||||
roots[k].exclude.extend(r.exclude);
|
||||
}
|
||||
roots[k].include.sort();
|
||||
roots[k].exclude.sort();
|
||||
roots[k].include.dedup();
|
||||
roots[k].exclude.dedup();
|
||||
}
|
||||
}
|
||||
|
||||
for root in roots.into_iter().filter(|it| !it.include.is_empty()) {
|
||||
let file_set_roots: Vec<VfsPath> =
|
||||
root.include.iter().cloned().map(VfsPath::from).collect();
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user