2019-12-27 04:10:07 -06:00
|
|
|
//! cargo_check provides the functionality needed to run `cargo check` or
|
|
|
|
//! another compatible command (f.x. clippy) in a background thread and provide
|
|
|
|
//! LSP diagnostics based on the output of the command.
|
2020-03-31 17:16:16 -05:00
|
|
|
|
2019-12-27 04:10:07 -06:00
|
|
|
use std::{
|
2020-05-14 18:51:48 -05:00
|
|
|
io::{self, BufReader},
|
2020-04-01 05:03:06 -05:00
|
|
|
path::PathBuf,
|
2020-03-21 16:30:33 -05:00
|
|
|
process::{Command, Stdio},
|
2019-12-27 04:10:07 -06:00
|
|
|
time::Instant,
|
|
|
|
};
|
|
|
|
|
2020-03-31 17:16:16 -05:00
|
|
|
use cargo_metadata::Message;
|
|
|
|
use crossbeam_channel::{never, select, unbounded, Receiver, RecvError, Sender};
|
2019-12-29 11:27:14 -06:00
|
|
|
|
2020-05-14 18:58:39 -05:00
|
|
|
pub use cargo_metadata::diagnostic::{
|
2020-05-14 19:08:50 -05:00
|
|
|
Applicability, Diagnostic, DiagnosticLevel, DiagnosticSpan, DiagnosticSpanMacroExpansion,
|
2020-05-14 18:58:39 -05:00
|
|
|
};
|
|
|
|
|
2020-04-01 11:41:43 -05:00
|
|
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
2020-04-01 05:31:42 -05:00
|
|
|
pub enum FlycheckConfig {
|
2020-05-05 15:44:39 -05:00
|
|
|
CargoCommand { command: String, all_targets: bool, all_features: bool, extra_args: Vec<String> },
|
2020-04-01 05:31:42 -05:00
|
|
|
CustomCommand { command: String, args: Vec<String> },
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
|
2020-04-01 04:09:19 -05:00
|
|
|
/// Flycheck wraps the shared state and communication machinery used for
|
2019-12-27 04:10:07 -06:00
|
|
|
/// running `cargo check` (or other compatible command) and providing
|
|
|
|
/// diagnostics based on the output.
|
2019-12-27 04:43:05 -06:00
|
|
|
/// The spawned thread is shut down when this struct is dropped.
|
2019-12-27 04:10:07 -06:00
|
|
|
#[derive(Debug)]
|
2020-04-01 04:09:19 -05:00
|
|
|
pub struct Flycheck {
|
2020-03-28 05:31:10 -05:00
|
|
|
// XXX: drop order is significant
|
2020-03-30 04:46:04 -05:00
|
|
|
cmd_send: Sender<CheckCommand>,
|
2020-04-01 04:16:43 -05:00
|
|
|
handle: jod_thread::JoinHandle<()>,
|
2020-03-28 07:19:05 -05:00
|
|
|
pub task_recv: Receiver<CheckTask>,
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
|
2020-04-01 04:09:19 -05:00
|
|
|
impl Flycheck {
|
2020-04-01 05:03:06 -05:00
|
|
|
pub fn new(config: FlycheckConfig, workspace_root: PathBuf) -> Flycheck {
|
2019-12-27 04:10:07 -06:00
|
|
|
let (task_send, task_recv) = unbounded::<CheckTask>();
|
|
|
|
let (cmd_send, cmd_recv) = unbounded::<CheckCommand>();
|
2020-03-28 05:31:10 -05:00
|
|
|
let handle = jod_thread::spawn(move || {
|
2020-04-01 05:03:06 -05:00
|
|
|
FlycheckThread::new(config, workspace_root).run(&task_send, &cmd_recv);
|
2019-12-27 04:10:07 -06:00
|
|
|
});
|
2020-04-01 04:16:43 -05:00
|
|
|
Flycheck { task_recv, cmd_send, handle }
|
2020-01-11 14:32:40 -06:00
|
|
|
}
|
|
|
|
|
2019-12-27 04:10:07 -06:00
|
|
|
/// Schedule a re-start of the cargo check worker.
|
|
|
|
pub fn update(&self) {
|
2020-03-30 04:46:04 -05:00
|
|
|
self.cmd_send.send(CheckCommand::Update).unwrap();
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum CheckTask {
|
2020-01-15 08:50:49 -06:00
|
|
|
/// Request a clearing of all cached diagnostics from the check watcher
|
|
|
|
ClearDiagnostics,
|
|
|
|
|
|
|
|
/// Request adding a diagnostic with fixes included to a file
|
2020-05-14 18:58:39 -05:00
|
|
|
AddDiagnostic { workspace_root: PathBuf, diagnostic: Diagnostic },
|
2019-12-27 04:10:07 -06:00
|
|
|
|
|
|
|
/// Request check progress notification to client
|
2020-05-14 18:51:48 -05:00
|
|
|
Status(Status),
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum Status {
|
|
|
|
Being,
|
|
|
|
Progress(String),
|
|
|
|
End,
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
pub enum CheckCommand {
|
|
|
|
/// Request re-start of check thread
|
|
|
|
Update,
|
|
|
|
}
|
|
|
|
|
2020-04-01 04:09:19 -05:00
|
|
|
struct FlycheckThread {
|
2020-04-01 05:03:06 -05:00
|
|
|
config: FlycheckConfig,
|
2019-12-27 04:43:05 -06:00
|
|
|
workspace_root: PathBuf,
|
|
|
|
last_update_req: Option<Instant>,
|
2020-03-31 17:39:50 -05:00
|
|
|
// XXX: drop order is significant
|
|
|
|
message_recv: Receiver<CheckEvent>,
|
|
|
|
/// WatchThread exists to wrap around the communication needed to be able to
|
|
|
|
/// run `cargo check` without blocking. Currently the Rust standard library
|
|
|
|
/// doesn't provide a way to read sub-process output without blocking, so we
|
|
|
|
/// have to wrap sub-processes output handling in a thread and pass messages
|
|
|
|
/// back over a channel.
|
|
|
|
check_process: Option<jod_thread::JoinHandle<()>>,
|
2019-12-27 04:43:05 -06:00
|
|
|
}
|
|
|
|
|
2020-04-01 04:09:19 -05:00
|
|
|
impl FlycheckThread {
|
2020-04-01 05:03:06 -05:00
|
|
|
fn new(config: FlycheckConfig, workspace_root: PathBuf) -> FlycheckThread {
|
2020-04-01 04:09:19 -05:00
|
|
|
FlycheckThread {
|
2020-04-01 05:03:06 -05:00
|
|
|
config,
|
2020-01-13 10:12:14 -06:00
|
|
|
workspace_root,
|
|
|
|
last_update_req: None,
|
2020-03-31 17:39:50 -05:00
|
|
|
message_recv: never(),
|
|
|
|
check_process: None,
|
2020-01-13 10:12:14 -06:00
|
|
|
}
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
|
2019-12-29 08:07:53 -06:00
|
|
|
fn run(&mut self, task_send: &Sender<CheckTask>, cmd_recv: &Receiver<CheckCommand>) {
|
2020-03-30 06:38:01 -05:00
|
|
|
// If we rerun the thread, we need to discard the previous check results first
|
|
|
|
self.clean_previous_results(task_send);
|
|
|
|
|
2019-12-27 04:43:05 -06:00
|
|
|
loop {
|
2019-12-27 04:10:07 -06:00
|
|
|
select! {
|
|
|
|
recv(&cmd_recv) -> cmd => match cmd {
|
|
|
|
Ok(cmd) => self.handle_command(cmd),
|
|
|
|
Err(RecvError) => {
|
|
|
|
// Command channel has closed, so shut down
|
2019-12-27 04:43:05 -06:00
|
|
|
break;
|
2019-12-27 04:10:07 -06:00
|
|
|
},
|
|
|
|
},
|
2020-03-31 17:39:50 -05:00
|
|
|
recv(self.message_recv) -> msg => match msg {
|
2019-12-27 04:10:07 -06:00
|
|
|
Ok(msg) => self.handle_message(msg, task_send),
|
2019-12-27 04:43:05 -06:00
|
|
|
Err(RecvError) => {
|
2019-12-27 10:29:02 -06:00
|
|
|
// Watcher finished, replace it with a never channel to
|
|
|
|
// avoid busy-waiting.
|
2020-03-31 17:39:50 -05:00
|
|
|
self.message_recv = never();
|
|
|
|
self.check_process = None;
|
2019-12-27 04:43:05 -06:00
|
|
|
},
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if self.should_recheck() {
|
2020-03-31 17:39:50 -05:00
|
|
|
self.last_update_req = None;
|
2020-01-15 08:50:49 -06:00
|
|
|
task_send.send(CheckTask::ClearDiagnostics).unwrap();
|
2020-03-31 17:39:50 -05:00
|
|
|
self.restart_check_process();
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-30 06:38:01 -05:00
|
|
|
fn clean_previous_results(&self, task_send: &Sender<CheckTask>) {
|
|
|
|
task_send.send(CheckTask::ClearDiagnostics).unwrap();
|
2020-05-14 18:51:48 -05:00
|
|
|
task_send.send(CheckTask::Status(Status::End)).unwrap();
|
2020-03-30 06:38:01 -05:00
|
|
|
}
|
|
|
|
|
2019-12-27 04:10:07 -06:00
|
|
|
fn should_recheck(&mut self) -> bool {
|
|
|
|
if let Some(_last_update_req) = &self.last_update_req {
|
|
|
|
// We currently only request an update on save, as we need up to
|
|
|
|
// date source on disk for cargo check to do it's magic, so we
|
|
|
|
// don't really need to debounce the requests at this point.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
false
|
|
|
|
}
|
|
|
|
|
|
|
|
fn handle_command(&mut self, cmd: CheckCommand) {
|
|
|
|
match cmd {
|
|
|
|
CheckCommand::Update => self.last_update_req = Some(Instant::now()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-15 08:50:49 -06:00
|
|
|
fn handle_message(&self, msg: CheckEvent, task_send: &Sender<CheckTask>) {
|
2019-12-27 04:10:07 -06:00
|
|
|
match msg {
|
|
|
|
CheckEvent::Begin => {
|
2020-05-14 18:51:48 -05:00
|
|
|
task_send.send(CheckTask::Status(Status::Being)).unwrap();
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
CheckEvent::End => {
|
2020-05-14 18:51:48 -05:00
|
|
|
task_send.send(CheckTask::Status(Status::End)).unwrap();
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
CheckEvent::Msg(Message::CompilerArtifact(msg)) => {
|
2020-05-14 18:51:48 -05:00
|
|
|
task_send.send(CheckTask::Status(Status::Progress(msg.target.name))).unwrap();
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
CheckEvent::Msg(Message::CompilerMessage(msg)) => {
|
2020-05-14 18:51:48 -05:00
|
|
|
task_send
|
|
|
|
.send(CheckTask::AddDiagnostic {
|
|
|
|
workspace_root: self.workspace_root.clone(),
|
|
|
|
diagnostic: msg.message,
|
|
|
|
})
|
|
|
|
.unwrap();
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
CheckEvent::Msg(Message::BuildScriptExecuted(_msg)) => {}
|
2020-05-09 17:22:26 -05:00
|
|
|
CheckEvent::Msg(Message::BuildFinished(_)) => {}
|
|
|
|
CheckEvent::Msg(Message::TextLine(_)) => {}
|
2019-12-27 04:10:07 -06:00
|
|
|
CheckEvent::Msg(Message::Unknown) => {}
|
|
|
|
}
|
|
|
|
}
|
2020-03-31 17:39:50 -05:00
|
|
|
|
|
|
|
fn restart_check_process(&mut self) {
|
|
|
|
// First, clear and cancel the old thread
|
|
|
|
self.message_recv = never();
|
|
|
|
self.check_process = None;
|
|
|
|
|
2020-04-01 05:31:42 -05:00
|
|
|
let mut cmd = match &self.config {
|
2020-05-05 15:44:39 -05:00
|
|
|
FlycheckConfig::CargoCommand { command, all_targets, all_features, extra_args } => {
|
2020-05-08 07:54:29 -05:00
|
|
|
let mut cmd = Command::new(ra_toolchain::cargo());
|
2020-04-01 05:31:42 -05:00
|
|
|
cmd.arg(command);
|
2020-05-08 07:54:29 -05:00
|
|
|
cmd.args(&["--workspace", "--message-format=json", "--manifest-path"])
|
|
|
|
.arg(self.workspace_root.join("Cargo.toml"));
|
2020-04-01 05:31:42 -05:00
|
|
|
if *all_targets {
|
|
|
|
cmd.arg("--all-targets");
|
|
|
|
}
|
2020-05-05 15:44:39 -05:00
|
|
|
if *all_features {
|
|
|
|
cmd.arg("--all-features");
|
|
|
|
}
|
2020-04-01 05:31:42 -05:00
|
|
|
cmd.args(extra_args);
|
|
|
|
cmd
|
|
|
|
}
|
|
|
|
FlycheckConfig::CustomCommand { command, args } => {
|
|
|
|
let mut cmd = Command::new(command);
|
|
|
|
cmd.args(args);
|
|
|
|
cmd
|
2020-04-01 05:03:06 -05:00
|
|
|
}
|
|
|
|
};
|
2020-04-01 05:31:42 -05:00
|
|
|
cmd.current_dir(&self.workspace_root);
|
2020-03-31 17:39:50 -05:00
|
|
|
|
|
|
|
let (message_send, message_recv) = unbounded();
|
|
|
|
self.message_recv = message_recv;
|
|
|
|
self.check_process = Some(jod_thread::spawn(move || {
|
|
|
|
// If we trigger an error here, we will do so in the loop instead,
|
|
|
|
// which will break out of the loop, and continue the shutdown
|
|
|
|
let _ = message_send.send(CheckEvent::Begin);
|
|
|
|
|
2020-04-01 05:03:06 -05:00
|
|
|
let res = run_cargo(cmd, &mut |message| {
|
2020-03-31 17:39:50 -05:00
|
|
|
// Skip certain kinds of messages to only spend time on what's useful
|
|
|
|
match &message {
|
|
|
|
Message::CompilerArtifact(artifact) if artifact.fresh => return true,
|
|
|
|
Message::BuildScriptExecuted(_) => return true,
|
|
|
|
Message::Unknown => return true,
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
|
|
|
|
// if the send channel was closed, we want to shutdown
|
|
|
|
message_send.send(CheckEvent::Msg(message)).is_ok()
|
|
|
|
});
|
|
|
|
|
|
|
|
if let Err(err) = res {
|
|
|
|
// FIXME: make the `message_send` to be `Sender<Result<CheckEvent, CargoError>>`
|
|
|
|
// to display user-caused misconfiguration errors instead of just logging them here
|
|
|
|
log::error!("Cargo watcher failed {:?}", err);
|
|
|
|
}
|
|
|
|
|
|
|
|
// We can ignore any error here, as we are already in the progress
|
|
|
|
// of shutting down.
|
|
|
|
let _ = message_send.send(CheckEvent::End);
|
|
|
|
}))
|
|
|
|
}
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
enum CheckEvent {
|
|
|
|
Begin,
|
|
|
|
Msg(cargo_metadata::Message),
|
|
|
|
End,
|
|
|
|
}
|
|
|
|
|
2020-03-31 11:43:22 -05:00
|
|
|
fn run_cargo(
|
2020-04-01 05:03:06 -05:00
|
|
|
mut command: Command,
|
2020-03-17 08:56:53 -05:00
|
|
|
on_message: &mut dyn FnMut(cargo_metadata::Message) -> bool,
|
2020-04-01 05:45:37 -05:00
|
|
|
) -> io::Result<()> {
|
|
|
|
let mut child =
|
|
|
|
command.stdout(Stdio::piped()).stderr(Stdio::null()).stdin(Stdio::null()).spawn()?;
|
2020-03-16 07:43:29 -05:00
|
|
|
|
|
|
|
// We manually read a line at a time, instead of using serde's
|
|
|
|
// stream deserializers, because the deserializer cannot recover
|
|
|
|
// from an error, resulting in it getting stuck, because we try to
|
|
|
|
// be resillient against failures.
|
|
|
|
//
|
|
|
|
// Because cargo only outputs one JSON object per line, we can
|
|
|
|
// simply skip a line if it doesn't parse, which just ignores any
|
|
|
|
// erroneus output.
|
|
|
|
let stdout = BufReader::new(child.stdout.take().unwrap());
|
2020-03-21 16:30:33 -05:00
|
|
|
let mut read_at_least_one_message = false;
|
2020-05-14 18:51:48 -05:00
|
|
|
for message in cargo_metadata::Message::parse_stream(stdout) {
|
2020-03-16 07:43:29 -05:00
|
|
|
let message = match message {
|
|
|
|
Ok(message) => message,
|
|
|
|
Err(err) => {
|
2020-05-14 18:51:48 -05:00
|
|
|
log::error!("Invalid json from cargo check, ignoring ({})", err);
|
2020-03-16 07:43:29 -05:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-03-21 16:30:33 -05:00
|
|
|
read_at_least_one_message = true;
|
|
|
|
|
2020-03-16 07:43:29 -05:00
|
|
|
if !on_message(message) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-21 16:30:33 -05:00
|
|
|
// It is okay to ignore the result, as it only errors if the process is already dead
|
|
|
|
let _ = child.kill();
|
|
|
|
|
2020-04-01 05:45:37 -05:00
|
|
|
let exit_status = child.wait()?;
|
|
|
|
if !exit_status.success() && !read_at_least_one_message {
|
|
|
|
// FIXME: Read the stderr to display the reason, see `read2()` reference in PR comment:
|
|
|
|
// https://github.com/rust-analyzer/rust-analyzer/pull/3632#discussion_r395605298
|
|
|
|
return Err(io::Error::new(
|
|
|
|
io::ErrorKind::Other,
|
2020-03-22 05:01:49 -05:00
|
|
|
format!(
|
2020-04-01 05:03:06 -05:00
|
|
|
"the command produced no valid metadata (exit code: {:?}): {:?}",
|
2020-04-01 05:45:37 -05:00
|
|
|
exit_status, command
|
|
|
|
),
|
|
|
|
));
|
|
|
|
}
|
2020-03-21 16:30:33 -05:00
|
|
|
|
2020-04-01 05:45:37 -05:00
|
|
|
Ok(())
|
2020-03-16 07:43:29 -05:00
|
|
|
}
|