2019-12-27 04:10:07 -06:00
|
|
|
//! cargo_check provides the functionality needed to run `cargo check` or
|
|
|
|
//! another compatible command (f.x. clippy) in a background thread and provide
|
|
|
|
//! LSP diagnostics based on the output of the command.
|
|
|
|
use cargo_metadata::Message;
|
2019-12-27 10:29:02 -06:00
|
|
|
use crossbeam_channel::{never, select, unbounded, Receiver, RecvError, Sender};
|
2019-12-27 04:10:07 -06:00
|
|
|
use lsp_types::{
|
2020-01-31 12:23:25 -06:00
|
|
|
CodeAction, CodeActionOrCommand, Diagnostic, Url, WorkDoneProgress, WorkDoneProgressBegin,
|
|
|
|
WorkDoneProgressEnd, WorkDoneProgressReport,
|
2019-12-27 04:10:07 -06:00
|
|
|
};
|
|
|
|
use std::{
|
2020-01-29 06:40:27 -06:00
|
|
|
io::{BufRead, BufReader},
|
2020-03-16 07:43:29 -05:00
|
|
|
path::{Path, PathBuf},
|
|
|
|
process::{Child, Command, Stdio},
|
2019-12-27 04:10:07 -06:00
|
|
|
thread::JoinHandle,
|
|
|
|
time::Instant,
|
|
|
|
};
|
|
|
|
|
|
|
|
mod conv;
|
|
|
|
|
2020-01-31 12:23:25 -06:00
|
|
|
use crate::conv::{map_rust_diagnostic_to_lsp, MappedRustDiagnostic};
|
2019-12-27 04:10:07 -06:00
|
|
|
|
2019-12-29 11:27:14 -06:00
|
|
|
pub use crate::conv::url_from_path_with_drive_lowercasing;
|
|
|
|
|
2019-12-27 04:10:07 -06:00
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
pub struct CheckOptions {
|
|
|
|
pub enable: bool,
|
|
|
|
pub args: Vec<String>,
|
|
|
|
pub command: String,
|
|
|
|
pub all_targets: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// CheckWatcher wraps the shared state and communication machinery used for
|
|
|
|
/// running `cargo check` (or other compatible command) and providing
|
|
|
|
/// diagnostics based on the output.
|
2019-12-27 04:43:05 -06:00
|
|
|
/// The spawned thread is shut down when this struct is dropped.
|
2019-12-27 04:10:07 -06:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct CheckWatcher {
|
|
|
|
pub task_recv: Receiver<CheckTask>,
|
2019-12-29 08:07:53 -06:00
|
|
|
cmd_send: Option<Sender<CheckCommand>>,
|
2019-12-27 04:43:05 -06:00
|
|
|
handle: Option<JoinHandle<()>>,
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
impl CheckWatcher {
|
|
|
|
pub fn new(options: &CheckOptions, workspace_root: PathBuf) -> CheckWatcher {
|
|
|
|
let options = options.clone();
|
|
|
|
|
|
|
|
let (task_send, task_recv) = unbounded::<CheckTask>();
|
|
|
|
let (cmd_send, cmd_recv) = unbounded::<CheckCommand>();
|
|
|
|
let handle = std::thread::spawn(move || {
|
2020-01-15 08:53:08 -06:00
|
|
|
let mut check = CheckWatcherThread::new(options, workspace_root);
|
2019-12-27 04:10:07 -06:00
|
|
|
check.run(&task_send, &cmd_recv);
|
|
|
|
});
|
2020-01-31 12:23:25 -06:00
|
|
|
CheckWatcher { task_recv, cmd_send: Some(cmd_send), handle: Some(handle) }
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
|
2020-01-11 14:32:40 -06:00
|
|
|
/// Returns a CheckWatcher that doesn't actually do anything
|
|
|
|
pub fn dummy() -> CheckWatcher {
|
2020-01-31 12:23:25 -06:00
|
|
|
CheckWatcher { task_recv: never(), cmd_send: None, handle: None }
|
2020-01-11 14:32:40 -06:00
|
|
|
}
|
|
|
|
|
2019-12-27 04:10:07 -06:00
|
|
|
/// Schedule a re-start of the cargo check worker.
|
|
|
|
pub fn update(&self) {
|
2019-12-27 05:42:18 -06:00
|
|
|
if let Some(cmd_send) = &self.cmd_send {
|
|
|
|
cmd_send.send(CheckCommand::Update).unwrap();
|
|
|
|
}
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-27 04:43:05 -06:00
|
|
|
impl std::ops::Drop for CheckWatcher {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
if let Some(handle) = self.handle.take() {
|
2019-12-27 05:42:18 -06:00
|
|
|
// Take the sender out of the option
|
|
|
|
let recv = self.cmd_send.take();
|
2019-12-27 04:43:05 -06:00
|
|
|
|
2019-12-27 05:42:18 -06:00
|
|
|
// Dropping the sender finishes the thread loop
|
2019-12-27 04:43:05 -06:00
|
|
|
drop(recv);
|
|
|
|
|
|
|
|
// Join the thread, it should finish shortly. We don't really care
|
|
|
|
// whether it panicked, so it is safe to ignore the result
|
|
|
|
let _ = handle.join();
|
|
|
|
}
|
|
|
|
}
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum CheckTask {
|
2020-01-15 08:50:49 -06:00
|
|
|
/// Request a clearing of all cached diagnostics from the check watcher
|
|
|
|
ClearDiagnostics,
|
|
|
|
|
|
|
|
/// Request adding a diagnostic with fixes included to a file
|
2020-01-31 12:23:25 -06:00
|
|
|
AddDiagnostic { url: Url, diagnostic: Diagnostic, fixes: Vec<CodeActionOrCommand> },
|
2019-12-27 04:10:07 -06:00
|
|
|
|
|
|
|
/// Request check progress notification to client
|
|
|
|
Status(WorkDoneProgress),
|
|
|
|
}
|
|
|
|
|
|
|
|
pub enum CheckCommand {
|
|
|
|
/// Request re-start of check thread
|
|
|
|
Update,
|
|
|
|
}
|
|
|
|
|
2020-01-15 08:53:08 -06:00
|
|
|
struct CheckWatcherThread {
|
2019-12-27 04:43:05 -06:00
|
|
|
options: CheckOptions,
|
|
|
|
workspace_root: PathBuf,
|
|
|
|
watcher: WatchThread,
|
|
|
|
last_update_req: Option<Instant>,
|
|
|
|
}
|
|
|
|
|
2020-01-15 08:53:08 -06:00
|
|
|
impl CheckWatcherThread {
|
|
|
|
fn new(options: CheckOptions, workspace_root: PathBuf) -> CheckWatcherThread {
|
|
|
|
CheckWatcherThread {
|
2020-01-13 10:12:14 -06:00
|
|
|
options,
|
|
|
|
workspace_root,
|
|
|
|
watcher: WatchThread::dummy(),
|
|
|
|
last_update_req: None,
|
|
|
|
}
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
|
2019-12-29 08:07:53 -06:00
|
|
|
fn run(&mut self, task_send: &Sender<CheckTask>, cmd_recv: &Receiver<CheckCommand>) {
|
2019-12-27 04:43:05 -06:00
|
|
|
loop {
|
2019-12-27 04:10:07 -06:00
|
|
|
select! {
|
|
|
|
recv(&cmd_recv) -> cmd => match cmd {
|
|
|
|
Ok(cmd) => self.handle_command(cmd),
|
|
|
|
Err(RecvError) => {
|
|
|
|
// Command channel has closed, so shut down
|
2019-12-27 04:43:05 -06:00
|
|
|
break;
|
2019-12-27 04:10:07 -06:00
|
|
|
},
|
|
|
|
},
|
|
|
|
recv(self.watcher.message_recv) -> msg => match msg {
|
|
|
|
Ok(msg) => self.handle_message(msg, task_send),
|
2019-12-27 04:43:05 -06:00
|
|
|
Err(RecvError) => {
|
2019-12-27 10:29:02 -06:00
|
|
|
// Watcher finished, replace it with a never channel to
|
|
|
|
// avoid busy-waiting.
|
|
|
|
std::mem::replace(&mut self.watcher.message_recv, never());
|
2019-12-27 04:43:05 -06:00
|
|
|
},
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if self.should_recheck() {
|
|
|
|
self.last_update_req.take();
|
2020-01-15 08:50:49 -06:00
|
|
|
task_send.send(CheckTask::ClearDiagnostics).unwrap();
|
2019-12-27 04:10:07 -06:00
|
|
|
|
2020-01-28 07:48:50 -06:00
|
|
|
// Replace with a dummy watcher first so we drop the original and wait for completion
|
|
|
|
std::mem::replace(&mut self.watcher, WatchThread::dummy());
|
|
|
|
|
|
|
|
// Then create the actual new watcher
|
2019-12-27 04:10:07 -06:00
|
|
|
self.watcher = WatchThread::new(&self.options, &self.workspace_root);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn should_recheck(&mut self) -> bool {
|
|
|
|
if let Some(_last_update_req) = &self.last_update_req {
|
|
|
|
// We currently only request an update on save, as we need up to
|
|
|
|
// date source on disk for cargo check to do it's magic, so we
|
|
|
|
// don't really need to debounce the requests at this point.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
false
|
|
|
|
}
|
|
|
|
|
|
|
|
fn handle_command(&mut self, cmd: CheckCommand) {
|
|
|
|
match cmd {
|
|
|
|
CheckCommand::Update => self.last_update_req = Some(Instant::now()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-15 08:50:49 -06:00
|
|
|
fn handle_message(&self, msg: CheckEvent, task_send: &Sender<CheckTask>) {
|
2019-12-27 04:10:07 -06:00
|
|
|
match msg {
|
|
|
|
CheckEvent::Begin => {
|
|
|
|
task_send
|
|
|
|
.send(CheckTask::Status(WorkDoneProgress::Begin(WorkDoneProgressBegin {
|
|
|
|
title: "Running 'cargo check'".to_string(),
|
|
|
|
cancellable: Some(false),
|
|
|
|
message: None,
|
|
|
|
percentage: None,
|
|
|
|
})))
|
|
|
|
.unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
CheckEvent::End => {
|
|
|
|
task_send
|
|
|
|
.send(CheckTask::Status(WorkDoneProgress::End(WorkDoneProgressEnd {
|
|
|
|
message: None,
|
|
|
|
})))
|
|
|
|
.unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
CheckEvent::Msg(Message::CompilerArtifact(msg)) => {
|
|
|
|
task_send
|
|
|
|
.send(CheckTask::Status(WorkDoneProgress::Report(WorkDoneProgressReport {
|
|
|
|
cancellable: Some(false),
|
|
|
|
message: Some(msg.target.name),
|
|
|
|
percentage: None,
|
|
|
|
})))
|
|
|
|
.unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
CheckEvent::Msg(Message::CompilerMessage(msg)) => {
|
2020-03-12 09:24:20 -05:00
|
|
|
let map_result = map_rust_diagnostic_to_lsp(&msg.message, &self.workspace_root);
|
|
|
|
if map_result.is_empty() {
|
|
|
|
return;
|
|
|
|
}
|
2019-12-27 04:10:07 -06:00
|
|
|
|
2020-03-12 09:24:20 -05:00
|
|
|
for MappedRustDiagnostic { location, diagnostic, fixes } in map_result {
|
|
|
|
let fixes = fixes
|
|
|
|
.into_iter()
|
|
|
|
.map(|fix| {
|
|
|
|
CodeAction { diagnostics: Some(vec![diagnostic.clone()]), ..fix }.into()
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
task_send
|
|
|
|
.send(CheckTask::AddDiagnostic { url: location.uri, diagnostic, fixes })
|
|
|
|
.unwrap();
|
|
|
|
}
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
CheckEvent::Msg(Message::BuildScriptExecuted(_msg)) => {}
|
|
|
|
CheckEvent::Msg(Message::Unknown) => {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-15 08:50:49 -06:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct DiagnosticWithFixes {
|
|
|
|
diagnostic: Diagnostic,
|
2020-01-31 12:23:25 -06:00
|
|
|
fixes: Vec<CodeAction>,
|
2020-01-15 08:50:49 -06:00
|
|
|
}
|
|
|
|
|
2019-12-27 04:10:07 -06:00
|
|
|
/// WatchThread exists to wrap around the communication needed to be able to
|
|
|
|
/// run `cargo check` without blocking. Currently the Rust standard library
|
|
|
|
/// doesn't provide a way to read sub-process output without blocking, so we
|
|
|
|
/// have to wrap sub-processes output handling in a thread and pass messages
|
|
|
|
/// back over a channel.
|
2019-12-27 04:31:25 -06:00
|
|
|
/// The correct way to dispose of the thread is to drop it, on which the
|
|
|
|
/// sub-process will be killed, and the thread will be joined.
|
2019-12-27 04:10:07 -06:00
|
|
|
struct WatchThread {
|
2019-12-27 04:31:25 -06:00
|
|
|
handle: Option<JoinHandle<()>>,
|
2019-12-27 04:10:07 -06:00
|
|
|
message_recv: Receiver<CheckEvent>,
|
|
|
|
}
|
|
|
|
|
|
|
|
enum CheckEvent {
|
|
|
|
Begin,
|
|
|
|
Msg(cargo_metadata::Message),
|
|
|
|
End,
|
|
|
|
}
|
|
|
|
|
2020-03-16 07:43:29 -05:00
|
|
|
pub fn run_cargo(
|
|
|
|
args: &[String],
|
|
|
|
current_dir: Option<&Path>,
|
|
|
|
mut on_message: impl FnMut(cargo_metadata::Message) -> bool,
|
|
|
|
) -> Child {
|
|
|
|
let mut command = Command::new("cargo");
|
|
|
|
if let Some(current_dir) = current_dir {
|
|
|
|
command.current_dir(current_dir);
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut child = command
|
|
|
|
.args(args)
|
|
|
|
.stdout(Stdio::piped())
|
|
|
|
.stderr(Stdio::null())
|
|
|
|
.stdin(Stdio::null())
|
|
|
|
.spawn()
|
|
|
|
.expect("couldn't launch cargo");
|
|
|
|
|
|
|
|
// We manually read a line at a time, instead of using serde's
|
|
|
|
// stream deserializers, because the deserializer cannot recover
|
|
|
|
// from an error, resulting in it getting stuck, because we try to
|
|
|
|
// be resillient against failures.
|
|
|
|
//
|
|
|
|
// Because cargo only outputs one JSON object per line, we can
|
|
|
|
// simply skip a line if it doesn't parse, which just ignores any
|
|
|
|
// erroneus output.
|
|
|
|
let stdout = BufReader::new(child.stdout.take().unwrap());
|
|
|
|
for line in stdout.lines() {
|
|
|
|
let line = match line {
|
|
|
|
Ok(line) => line,
|
|
|
|
Err(err) => {
|
|
|
|
log::error!("Couldn't read line from cargo: {}", err);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let message = serde_json::from_str::<cargo_metadata::Message>(&line);
|
|
|
|
let message = match message {
|
|
|
|
Ok(message) => message,
|
|
|
|
Err(err) => {
|
|
|
|
log::error!("Invalid json from cargo check, ignoring ({}): {:?} ", err, line);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if !on_message(message) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
child
|
|
|
|
}
|
|
|
|
|
2019-12-27 04:10:07 -06:00
|
|
|
impl WatchThread {
|
2020-01-13 10:12:14 -06:00
|
|
|
fn dummy() -> WatchThread {
|
|
|
|
WatchThread { handle: None, message_recv: never() }
|
|
|
|
}
|
|
|
|
|
2020-03-16 07:43:29 -05:00
|
|
|
fn new(options: &CheckOptions, workspace_root: &Path) -> WatchThread {
|
2019-12-27 04:10:07 -06:00
|
|
|
let mut args: Vec<String> = vec![
|
|
|
|
options.command.clone(),
|
2020-03-13 04:55:23 -05:00
|
|
|
"--workspace".to_string(),
|
2019-12-27 04:10:07 -06:00
|
|
|
"--message-format=json".to_string(),
|
|
|
|
"--manifest-path".to_string(),
|
2020-03-16 07:43:29 -05:00
|
|
|
format!("{}/Cargo.toml", workspace_root.display()),
|
2019-12-27 04:10:07 -06:00
|
|
|
];
|
|
|
|
if options.all_targets {
|
|
|
|
args.push("--all-targets".to_string());
|
|
|
|
}
|
|
|
|
args.extend(options.args.iter().cloned());
|
|
|
|
|
|
|
|
let (message_send, message_recv) = unbounded();
|
2020-03-16 07:43:29 -05:00
|
|
|
let workspace_root = workspace_root.to_owned();
|
|
|
|
let handle = if options.enable {
|
|
|
|
Some(std::thread::spawn(move || {
|
|
|
|
// If we trigger an error here, we will do so in the loop instead,
|
|
|
|
// which will break out of the loop, and continue the shutdown
|
|
|
|
let _ = message_send.send(CheckEvent::Begin);
|
|
|
|
|
|
|
|
let mut child = run_cargo(&args, Some(&workspace_root), |message| {
|
|
|
|
// Skip certain kinds of messages to only spend time on what's useful
|
|
|
|
match &message {
|
|
|
|
Message::CompilerArtifact(artifact) if artifact.fresh => return true,
|
|
|
|
Message::BuildScriptExecuted(_) => return true,
|
|
|
|
Message::Unknown => return true,
|
|
|
|
_ => {}
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
2020-01-28 07:33:52 -06:00
|
|
|
|
2020-03-16 07:43:29 -05:00
|
|
|
match message_send.send(CheckEvent::Msg(message)) {
|
|
|
|
Ok(()) => {}
|
|
|
|
Err(_err) => {
|
|
|
|
// The send channel was closed, so we want to shutdown
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
true
|
|
|
|
});
|
|
|
|
|
|
|
|
// We can ignore any error here, as we are already in the progress
|
|
|
|
// of shutting down.
|
|
|
|
let _ = message_send.send(CheckEvent::End);
|
|
|
|
|
|
|
|
// It is okay to ignore the result, as it only errors if the process is already dead
|
|
|
|
let _ = child.kill();
|
|
|
|
|
|
|
|
// Again, we don't care about the exit status so just ignore the result
|
|
|
|
let _ = child.wait();
|
|
|
|
}))
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
WatchThread { handle, message_recv }
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
2019-12-27 04:31:25 -06:00
|
|
|
}
|
2019-12-27 04:10:07 -06:00
|
|
|
|
2019-12-27 04:31:25 -06:00
|
|
|
impl std::ops::Drop for WatchThread {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
if let Some(handle) = self.handle.take() {
|
|
|
|
// Replace our reciever with dummy one, so we can drop and close the
|
|
|
|
// one actually communicating with the thread
|
2019-12-27 10:29:02 -06:00
|
|
|
let recv = std::mem::replace(&mut self.message_recv, never());
|
2019-12-27 04:31:25 -06:00
|
|
|
|
|
|
|
// Dropping the original reciever initiates thread sub-process shutdown
|
|
|
|
drop(recv);
|
|
|
|
|
|
|
|
// Join the thread, it should finish shortly. We don't really care
|
|
|
|
// whether it panicked, so it is safe to ignore the result
|
|
|
|
let _ = handle.join();
|
|
|
|
}
|
2019-12-27 04:10:07 -06:00
|
|
|
}
|
|
|
|
}
|