2021-08-01 00:47:21 -05:00
|
|
|
use std::{
|
|
|
|
ops::Deref,
|
|
|
|
path::{Path, PathBuf},
|
|
|
|
};
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
|
2021-08-01 00:47:21 -05:00
|
|
|
use base_db::{CrateGraph, FileId};
|
2021-08-23 12:18:11 -05:00
|
|
|
use cfg::{CfgAtom, CfgDiff};
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
use expect_test::{expect, Expect};
|
2021-08-20 08:56:02 -05:00
|
|
|
use paths::{AbsPath, AbsPathBuf};
|
2021-08-01 00:47:21 -05:00
|
|
|
use serde::de::DeserializeOwned;
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
|
2021-08-01 00:47:21 -05:00
|
|
|
use crate::{
|
2022-09-19 10:31:08 -05:00
|
|
|
CargoWorkspace, CfgOverrides, ProjectJson, ProjectJsonData, ProjectWorkspace, Sysroot,
|
|
|
|
WorkspaceBuildScripts,
|
2021-08-01 00:47:21 -05:00
|
|
|
};
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
|
2021-08-01 00:47:21 -05:00
|
|
|
fn load_cargo(file: &str) -> CrateGraph {
|
2021-08-23 12:18:11 -05:00
|
|
|
load_cargo_with_overrides(file, CfgOverrides::default())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn load_cargo_with_overrides(file: &str, cfg_overrides: CfgOverrides) -> CrateGraph {
|
2021-08-01 00:47:21 -05:00
|
|
|
let meta = get_test_json_file(file);
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
let cargo_workspace = CargoWorkspace::new(meta);
|
|
|
|
let project_workspace = ProjectWorkspace::Cargo {
|
|
|
|
cargo: cargo_workspace,
|
|
|
|
build_scripts: WorkspaceBuildScripts::default(),
|
2021-08-20 08:56:02 -05:00
|
|
|
sysroot: None,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
rustc: None,
|
|
|
|
rustc_cfg: Vec::new(),
|
2021-08-23 12:18:11 -05:00
|
|
|
cfg_overrides,
|
2022-08-09 07:31:17 -05:00
|
|
|
toolchain: None,
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err("target_data_layout not loaded".into()),
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
};
|
2021-08-01 00:47:21 -05:00
|
|
|
to_crate_graph(project_workspace)
|
|
|
|
}
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
|
2021-08-01 00:47:21 -05:00
|
|
|
fn load_rust_project(file: &str) -> CrateGraph {
|
|
|
|
let data = get_test_json_file(file);
|
|
|
|
let project = rooted_project_json(data);
|
|
|
|
let sysroot = Some(get_fake_sysroot());
|
|
|
|
let project_workspace = ProjectWorkspace::Json { project, sysroot, rustc_cfg: Vec::new() };
|
|
|
|
to_crate_graph(project_workspace)
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
}
|
|
|
|
|
2021-08-01 00:47:21 -05:00
|
|
|
fn get_test_json_file<T: DeserializeOwned>(file: &str) -> T {
|
|
|
|
let file = get_test_path(file);
|
|
|
|
let data = std::fs::read_to_string(file).unwrap();
|
|
|
|
let mut json = data.parse::<serde_json::Value>().unwrap();
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
fixup_paths(&mut json);
|
|
|
|
return serde_json::from_value(json).unwrap();
|
|
|
|
|
2021-09-19 12:00:06 -05:00
|
|
|
fn fixup_paths(val: &mut serde_json::Value) {
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
match val {
|
|
|
|
serde_json::Value::String(s) => replace_root(s, true),
|
|
|
|
serde_json::Value::Array(vals) => vals.iter_mut().for_each(fixup_paths),
|
|
|
|
serde_json::Value::Object(kvals) => kvals.values_mut().for_each(fixup_paths),
|
|
|
|
serde_json::Value::Null | serde_json::Value::Bool(_) | serde_json::Value::Number(_) => {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn replace_root(s: &mut String, direction: bool) {
|
|
|
|
if direction {
|
|
|
|
let root = if cfg!(windows) { r#"C:\\ROOT\"# } else { "/ROOT/" };
|
|
|
|
*s = s.replace("$ROOT$", root)
|
|
|
|
} else {
|
|
|
|
let root = if cfg!(windows) { r#"C:\\\\ROOT\\"# } else { "/ROOT/" };
|
|
|
|
*s = s.replace(root, "$ROOT$")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-01 00:47:21 -05:00
|
|
|
fn get_test_path(file: &str) -> PathBuf {
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
let base = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
2021-08-01 00:47:21 -05:00
|
|
|
base.join("test_data").join(file)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_fake_sysroot() -> Sysroot {
|
|
|
|
let sysroot_path = get_test_path("fake-sysroot");
|
2022-07-25 09:07:41 -05:00
|
|
|
// there's no `libexec/` directory with a `proc-macro-srv` binary in that
|
|
|
|
// fake sysroot, so we give them both the same path:
|
|
|
|
let sysroot_dir = AbsPathBuf::assert(sysroot_path);
|
|
|
|
let sysroot_src_dir = sysroot_dir.clone();
|
2023-01-27 06:49:28 -06:00
|
|
|
Sysroot::load(sysroot_dir, sysroot_src_dir)
|
2021-08-01 00:47:21 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
fn rooted_project_json(data: ProjectJsonData) -> ProjectJson {
|
|
|
|
let mut root = "$ROOT$".to_string();
|
|
|
|
replace_root(&mut root, true);
|
|
|
|
let path = Path::new(&root);
|
|
|
|
let base = AbsPath::assert(path);
|
|
|
|
ProjectJson::new(base, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn to_crate_graph(project_workspace: ProjectWorkspace) -> CrateGraph {
|
2022-08-18 16:41:17 -05:00
|
|
|
project_workspace.to_crate_graph(
|
|
|
|
&mut |_, _| Ok(Vec::new()),
|
|
|
|
&mut {
|
|
|
|
let mut counter = 0;
|
|
|
|
move |_path| {
|
|
|
|
counter += 1;
|
|
|
|
Some(FileId(counter))
|
|
|
|
}
|
|
|
|
},
|
2022-09-19 10:31:08 -05:00
|
|
|
&Default::default(),
|
2022-08-18 16:41:17 -05:00
|
|
|
)
|
2021-08-01 00:47:21 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
fn check_crate_graph(crate_graph: CrateGraph, expect: Expect) {
|
2022-12-23 12:42:58 -06:00
|
|
|
let mut crate_graph = format!("{crate_graph:#?}");
|
2021-08-01 00:47:21 -05:00
|
|
|
replace_root(&mut crate_graph, false);
|
|
|
|
expect.assert_eq(&crate_graph);
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
}
|
|
|
|
|
2021-08-23 12:18:11 -05:00
|
|
|
#[test]
|
|
|
|
fn cargo_hello_world_project_model_with_wildcard_overrides() {
|
|
|
|
let cfg_overrides = CfgOverrides::Wildcard(
|
|
|
|
CfgDiff::new(Vec::new(), vec![CfgAtom::Flag("test".into())]).unwrap(),
|
|
|
|
);
|
|
|
|
let crate_graph = load_cargo_with_overrides("hello-world-metadata.json", cfg_overrides);
|
|
|
|
check_crate_graph(
|
|
|
|
crate_graph,
|
|
|
|
expect![[r#"
|
|
|
|
CrateGraph {
|
|
|
|
arena: {
|
|
|
|
CrateId(
|
|
|
|
0,
|
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
|
|
|
1,
|
|
|
|
),
|
|
|
|
edition: Edition2018,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: Some(
|
|
|
|
"0.1.0",
|
|
|
|
),
|
2021-08-23 12:18:11 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
|
|
|
"hello_world",
|
|
|
|
),
|
|
|
|
canonical_name: "hello-world",
|
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"target_data_layout not loaded",
|
|
|
|
),
|
2021-08-23 12:18:11 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {
|
|
|
|
"CARGO_PKG_LICENSE": "",
|
|
|
|
"CARGO_PKG_VERSION_MAJOR": "0",
|
|
|
|
"CARGO_MANIFEST_DIR": "$ROOT$hello-world",
|
|
|
|
"CARGO_PKG_VERSION": "0.1.0",
|
|
|
|
"CARGO_PKG_AUTHORS": "",
|
|
|
|
"CARGO_CRATE_NAME": "hello_world",
|
|
|
|
"CARGO_PKG_LICENSE_FILE": "",
|
|
|
|
"CARGO_PKG_HOMEPAGE": "",
|
|
|
|
"CARGO_PKG_DESCRIPTION": "",
|
|
|
|
"CARGO_PKG_NAME": "hello-world",
|
|
|
|
"CARGO_PKG_VERSION_PATCH": "0",
|
|
|
|
"CARGO": "cargo",
|
|
|
|
"CARGO_PKG_REPOSITORY": "",
|
|
|
|
"CARGO_PKG_VERSION_MINOR": "1",
|
|
|
|
"CARGO_PKG_VERSION_PRE": "",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
dependencies: [
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
4,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"libc",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
],
|
2022-06-24 06:03:13 -05:00
|
|
|
proc_macro: Err(
|
2022-06-28 03:41:10 -05:00
|
|
|
"crate has not (yet) been built",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
origin: CratesIo {
|
|
|
|
repo: None,
|
2022-09-26 12:09:46 -05:00
|
|
|
name: Some(
|
|
|
|
"hello-world",
|
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
},
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
1,
|
2021-08-23 12:18:11 -05:00
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
2022-08-25 14:06:35 -05:00
|
|
|
2,
|
2021-08-23 12:18:11 -05:00
|
|
|
),
|
|
|
|
edition: Edition2018,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: Some(
|
|
|
|
"0.1.0",
|
|
|
|
),
|
2021-08-23 12:18:11 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"hello_world",
|
2021-08-23 12:18:11 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
canonical_name: "hello-world",
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"target_data_layout not loaded",
|
|
|
|
),
|
2021-08-23 12:18:11 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {
|
|
|
|
"CARGO_PKG_LICENSE": "",
|
|
|
|
"CARGO_PKG_VERSION_MAJOR": "0",
|
|
|
|
"CARGO_MANIFEST_DIR": "$ROOT$hello-world",
|
|
|
|
"CARGO_PKG_VERSION": "0.1.0",
|
|
|
|
"CARGO_PKG_AUTHORS": "",
|
|
|
|
"CARGO_CRATE_NAME": "hello_world",
|
|
|
|
"CARGO_PKG_LICENSE_FILE": "",
|
|
|
|
"CARGO_PKG_HOMEPAGE": "",
|
|
|
|
"CARGO_PKG_DESCRIPTION": "",
|
|
|
|
"CARGO_PKG_NAME": "hello-world",
|
|
|
|
"CARGO_PKG_VERSION_PATCH": "0",
|
|
|
|
"CARGO": "cargo",
|
|
|
|
"CARGO_PKG_REPOSITORY": "",
|
|
|
|
"CARGO_PKG_VERSION_MINOR": "1",
|
|
|
|
"CARGO_PKG_VERSION_PRE": "",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
dependencies: [
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
0,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"hello_world",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
4,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"libc",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
],
|
2022-06-24 06:03:13 -05:00
|
|
|
proc_macro: Err(
|
2022-06-28 03:41:10 -05:00
|
|
|
"crate has not (yet) been built",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
origin: CratesIo {
|
|
|
|
repo: None,
|
2022-09-26 12:09:46 -05:00
|
|
|
name: Some(
|
|
|
|
"hello-world",
|
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
},
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
2,
|
2021-08-23 12:18:11 -05:00
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
2022-08-25 14:06:35 -05:00
|
|
|
3,
|
2021-08-23 12:18:11 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
edition: Edition2018,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: Some(
|
2022-08-25 14:06:35 -05:00
|
|
|
"0.1.0",
|
2021-10-30 09:17:04 -05:00
|
|
|
),
|
2021-08-23 12:18:11 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"an_example",
|
2021-08-23 12:18:11 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
canonical_name: "an-example",
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"target_data_layout not loaded",
|
|
|
|
),
|
2021-08-23 12:18:11 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {
|
|
|
|
"CARGO_PKG_LICENSE": "",
|
|
|
|
"CARGO_PKG_VERSION_MAJOR": "0",
|
2022-08-25 14:06:35 -05:00
|
|
|
"CARGO_MANIFEST_DIR": "$ROOT$hello-world",
|
|
|
|
"CARGO_PKG_VERSION": "0.1.0",
|
2021-08-23 12:18:11 -05:00
|
|
|
"CARGO_PKG_AUTHORS": "",
|
2022-08-25 14:06:35 -05:00
|
|
|
"CARGO_CRATE_NAME": "hello_world",
|
2021-08-23 12:18:11 -05:00
|
|
|
"CARGO_PKG_LICENSE_FILE": "",
|
|
|
|
"CARGO_PKG_HOMEPAGE": "",
|
|
|
|
"CARGO_PKG_DESCRIPTION": "",
|
2022-08-25 14:06:35 -05:00
|
|
|
"CARGO_PKG_NAME": "hello-world",
|
|
|
|
"CARGO_PKG_VERSION_PATCH": "0",
|
2021-08-23 12:18:11 -05:00
|
|
|
"CARGO": "cargo",
|
|
|
|
"CARGO_PKG_REPOSITORY": "",
|
2022-08-25 14:06:35 -05:00
|
|
|
"CARGO_PKG_VERSION_MINOR": "1",
|
2021-08-23 12:18:11 -05:00
|
|
|
"CARGO_PKG_VERSION_PRE": "",
|
|
|
|
},
|
|
|
|
},
|
2022-08-25 14:06:35 -05:00
|
|
|
dependencies: [
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
0,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"hello_world",
|
|
|
|
),
|
|
|
|
prelude: true,
|
|
|
|
},
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
4,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"libc",
|
|
|
|
),
|
|
|
|
prelude: true,
|
|
|
|
},
|
|
|
|
],
|
2022-06-24 06:03:13 -05:00
|
|
|
proc_macro: Err(
|
2022-06-28 03:41:10 -05:00
|
|
|
"crate has not (yet) been built",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
origin: CratesIo {
|
2022-08-25 14:06:35 -05:00
|
|
|
repo: None,
|
2022-09-26 12:09:46 -05:00
|
|
|
name: Some(
|
|
|
|
"hello-world",
|
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
},
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
3,
|
2021-08-23 12:18:11 -05:00
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
2022-08-25 14:06:35 -05:00
|
|
|
4,
|
2021-08-23 12:18:11 -05:00
|
|
|
),
|
|
|
|
edition: Edition2018,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: Some(
|
|
|
|
"0.1.0",
|
|
|
|
),
|
2021-08-23 12:18:11 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"it",
|
2021-08-23 12:18:11 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
canonical_name: "it",
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"target_data_layout not loaded",
|
|
|
|
),
|
2021-08-23 12:18:11 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {
|
|
|
|
"CARGO_PKG_LICENSE": "",
|
|
|
|
"CARGO_PKG_VERSION_MAJOR": "0",
|
|
|
|
"CARGO_MANIFEST_DIR": "$ROOT$hello-world",
|
|
|
|
"CARGO_PKG_VERSION": "0.1.0",
|
|
|
|
"CARGO_PKG_AUTHORS": "",
|
|
|
|
"CARGO_CRATE_NAME": "hello_world",
|
|
|
|
"CARGO_PKG_LICENSE_FILE": "",
|
|
|
|
"CARGO_PKG_HOMEPAGE": "",
|
|
|
|
"CARGO_PKG_DESCRIPTION": "",
|
|
|
|
"CARGO_PKG_NAME": "hello-world",
|
|
|
|
"CARGO_PKG_VERSION_PATCH": "0",
|
|
|
|
"CARGO": "cargo",
|
|
|
|
"CARGO_PKG_REPOSITORY": "",
|
|
|
|
"CARGO_PKG_VERSION_MINOR": "1",
|
|
|
|
"CARGO_PKG_VERSION_PRE": "",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
dependencies: [
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
0,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"hello_world",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
4,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"libc",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
],
|
2022-06-24 06:03:13 -05:00
|
|
|
proc_macro: Err(
|
2022-06-28 03:41:10 -05:00
|
|
|
"crate has not (yet) been built",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
origin: CratesIo {
|
|
|
|
repo: None,
|
2022-09-26 12:09:46 -05:00
|
|
|
name: Some(
|
|
|
|
"hello-world",
|
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
},
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
4,
|
2021-08-23 12:18:11 -05:00
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
2022-08-25 14:06:35 -05:00
|
|
|
5,
|
2021-08-23 12:18:11 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
edition: Edition2015,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: Some(
|
2022-08-25 14:06:35 -05:00
|
|
|
"0.2.98",
|
2021-10-30 09:17:04 -05:00
|
|
|
),
|
2021-08-23 12:18:11 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"libc",
|
2021-08-23 12:18:11 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
canonical_name: "libc",
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
2022-08-25 14:06:35 -05:00
|
|
|
"feature=default",
|
|
|
|
"feature=std",
|
2021-08-23 12:18:11 -05:00
|
|
|
],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
2022-08-25 14:06:35 -05:00
|
|
|
"feature=align",
|
|
|
|
"feature=const-extern-fn",
|
|
|
|
"feature=default",
|
|
|
|
"feature=extra_traits",
|
|
|
|
"feature=rustc-dep-of-std",
|
|
|
|
"feature=std",
|
|
|
|
"feature=use_std",
|
2021-08-23 12:18:11 -05:00
|
|
|
],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"target_data_layout not loaded",
|
|
|
|
),
|
2021-08-23 12:18:11 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {
|
|
|
|
"CARGO_PKG_LICENSE": "",
|
|
|
|
"CARGO_PKG_VERSION_MAJOR": "0",
|
2022-08-25 14:06:35 -05:00
|
|
|
"CARGO_MANIFEST_DIR": "$ROOT$.cargo/registry/src/github.com-1ecc6299db9ec823/libc-0.2.98",
|
|
|
|
"CARGO_PKG_VERSION": "0.2.98",
|
2021-08-23 12:18:11 -05:00
|
|
|
"CARGO_PKG_AUTHORS": "",
|
2022-08-25 14:06:35 -05:00
|
|
|
"CARGO_CRATE_NAME": "libc",
|
2021-08-23 12:18:11 -05:00
|
|
|
"CARGO_PKG_LICENSE_FILE": "",
|
|
|
|
"CARGO_PKG_HOMEPAGE": "",
|
|
|
|
"CARGO_PKG_DESCRIPTION": "",
|
2022-08-25 14:06:35 -05:00
|
|
|
"CARGO_PKG_NAME": "libc",
|
|
|
|
"CARGO_PKG_VERSION_PATCH": "98",
|
2021-08-23 12:18:11 -05:00
|
|
|
"CARGO": "cargo",
|
|
|
|
"CARGO_PKG_REPOSITORY": "",
|
2022-08-25 14:06:35 -05:00
|
|
|
"CARGO_PKG_VERSION_MINOR": "2",
|
2021-08-23 12:18:11 -05:00
|
|
|
"CARGO_PKG_VERSION_PRE": "",
|
|
|
|
},
|
|
|
|
},
|
2022-08-25 14:06:35 -05:00
|
|
|
dependencies: [],
|
2022-06-24 06:03:13 -05:00
|
|
|
proc_macro: Err(
|
2022-06-28 03:41:10 -05:00
|
|
|
"crate has not (yet) been built",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
origin: CratesIo {
|
2022-08-25 14:06:35 -05:00
|
|
|
repo: Some(
|
|
|
|
"https://github.com/rust-lang/libc",
|
|
|
|
),
|
2022-09-26 12:09:46 -05:00
|
|
|
name: Some(
|
|
|
|
"libc",
|
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
},
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}"#]],
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn cargo_hello_world_project_model_with_selective_overrides() {
|
|
|
|
let cfg_overrides = {
|
|
|
|
CfgOverrides::Selective(
|
|
|
|
std::iter::once((
|
|
|
|
"libc".to_owned(),
|
|
|
|
CfgDiff::new(Vec::new(), vec![CfgAtom::Flag("test".into())]).unwrap(),
|
|
|
|
))
|
|
|
|
.collect(),
|
|
|
|
)
|
|
|
|
};
|
|
|
|
let crate_graph = load_cargo_with_overrides("hello-world-metadata.json", cfg_overrides);
|
|
|
|
check_crate_graph(
|
|
|
|
crate_graph,
|
|
|
|
expect![[r#"
|
|
|
|
CrateGraph {
|
|
|
|
arena: {
|
|
|
|
CrateId(
|
|
|
|
0,
|
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
|
|
|
1,
|
|
|
|
),
|
|
|
|
edition: Edition2018,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: Some(
|
|
|
|
"0.1.0",
|
|
|
|
),
|
2021-08-23 12:18:11 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
|
|
|
"hello_world",
|
|
|
|
),
|
|
|
|
canonical_name: "hello-world",
|
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
"test",
|
|
|
|
],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
"test",
|
|
|
|
],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"target_data_layout not loaded",
|
|
|
|
),
|
2021-08-23 12:18:11 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {
|
|
|
|
"CARGO_PKG_LICENSE": "",
|
|
|
|
"CARGO_PKG_VERSION_MAJOR": "0",
|
|
|
|
"CARGO_MANIFEST_DIR": "$ROOT$hello-world",
|
|
|
|
"CARGO_PKG_VERSION": "0.1.0",
|
|
|
|
"CARGO_PKG_AUTHORS": "",
|
|
|
|
"CARGO_CRATE_NAME": "hello_world",
|
|
|
|
"CARGO_PKG_LICENSE_FILE": "",
|
|
|
|
"CARGO_PKG_HOMEPAGE": "",
|
|
|
|
"CARGO_PKG_DESCRIPTION": "",
|
|
|
|
"CARGO_PKG_NAME": "hello-world",
|
|
|
|
"CARGO_PKG_VERSION_PATCH": "0",
|
|
|
|
"CARGO": "cargo",
|
|
|
|
"CARGO_PKG_REPOSITORY": "",
|
|
|
|
"CARGO_PKG_VERSION_MINOR": "1",
|
|
|
|
"CARGO_PKG_VERSION_PRE": "",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
dependencies: [
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
4,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"libc",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
],
|
2022-06-24 06:03:13 -05:00
|
|
|
proc_macro: Err(
|
2022-06-28 03:41:10 -05:00
|
|
|
"crate has not (yet) been built",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
origin: CratesIo {
|
|
|
|
repo: None,
|
2022-09-26 12:09:46 -05:00
|
|
|
name: Some(
|
|
|
|
"hello-world",
|
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
},
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
1,
|
2021-08-23 12:18:11 -05:00
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
2022-08-25 14:06:35 -05:00
|
|
|
2,
|
2021-08-23 12:18:11 -05:00
|
|
|
),
|
|
|
|
edition: Edition2018,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: Some(
|
|
|
|
"0.1.0",
|
|
|
|
),
|
2021-08-23 12:18:11 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"hello_world",
|
2021-08-23 12:18:11 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
canonical_name: "hello-world",
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
"test",
|
|
|
|
],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
"test",
|
|
|
|
],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"target_data_layout not loaded",
|
|
|
|
),
|
2021-08-23 12:18:11 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {
|
|
|
|
"CARGO_PKG_LICENSE": "",
|
|
|
|
"CARGO_PKG_VERSION_MAJOR": "0",
|
|
|
|
"CARGO_MANIFEST_DIR": "$ROOT$hello-world",
|
|
|
|
"CARGO_PKG_VERSION": "0.1.0",
|
|
|
|
"CARGO_PKG_AUTHORS": "",
|
|
|
|
"CARGO_CRATE_NAME": "hello_world",
|
|
|
|
"CARGO_PKG_LICENSE_FILE": "",
|
|
|
|
"CARGO_PKG_HOMEPAGE": "",
|
|
|
|
"CARGO_PKG_DESCRIPTION": "",
|
|
|
|
"CARGO_PKG_NAME": "hello-world",
|
|
|
|
"CARGO_PKG_VERSION_PATCH": "0",
|
|
|
|
"CARGO": "cargo",
|
|
|
|
"CARGO_PKG_REPOSITORY": "",
|
|
|
|
"CARGO_PKG_VERSION_MINOR": "1",
|
|
|
|
"CARGO_PKG_VERSION_PRE": "",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
dependencies: [
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
0,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"hello_world",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
4,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"libc",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
],
|
2022-06-24 06:03:13 -05:00
|
|
|
proc_macro: Err(
|
2022-06-28 03:41:10 -05:00
|
|
|
"crate has not (yet) been built",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
origin: CratesIo {
|
|
|
|
repo: None,
|
2022-09-26 12:09:46 -05:00
|
|
|
name: Some(
|
|
|
|
"hello-world",
|
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
},
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
2,
|
2021-08-23 12:18:11 -05:00
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
2022-08-25 14:06:35 -05:00
|
|
|
3,
|
2021-08-23 12:18:11 -05:00
|
|
|
),
|
|
|
|
edition: Edition2018,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: Some(
|
|
|
|
"0.1.0",
|
|
|
|
),
|
2021-08-23 12:18:11 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"an_example",
|
2021-08-23 12:18:11 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
canonical_name: "an-example",
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
"test",
|
|
|
|
],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
"test",
|
|
|
|
],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"target_data_layout not loaded",
|
|
|
|
),
|
2021-08-23 12:18:11 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {
|
|
|
|
"CARGO_PKG_LICENSE": "",
|
|
|
|
"CARGO_PKG_VERSION_MAJOR": "0",
|
|
|
|
"CARGO_MANIFEST_DIR": "$ROOT$hello-world",
|
|
|
|
"CARGO_PKG_VERSION": "0.1.0",
|
|
|
|
"CARGO_PKG_AUTHORS": "",
|
|
|
|
"CARGO_CRATE_NAME": "hello_world",
|
|
|
|
"CARGO_PKG_LICENSE_FILE": "",
|
|
|
|
"CARGO_PKG_HOMEPAGE": "",
|
|
|
|
"CARGO_PKG_DESCRIPTION": "",
|
|
|
|
"CARGO_PKG_NAME": "hello-world",
|
|
|
|
"CARGO_PKG_VERSION_PATCH": "0",
|
|
|
|
"CARGO": "cargo",
|
|
|
|
"CARGO_PKG_REPOSITORY": "",
|
|
|
|
"CARGO_PKG_VERSION_MINOR": "1",
|
|
|
|
"CARGO_PKG_VERSION_PRE": "",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
dependencies: [
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
0,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"hello_world",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
4,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"libc",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
],
|
2022-06-24 06:03:13 -05:00
|
|
|
proc_macro: Err(
|
2022-06-28 03:41:10 -05:00
|
|
|
"crate has not (yet) been built",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
origin: CratesIo {
|
|
|
|
repo: None,
|
2022-09-26 12:09:46 -05:00
|
|
|
name: Some(
|
|
|
|
"hello-world",
|
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
},
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
|
|
|
3,
|
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
|
|
|
4,
|
|
|
|
),
|
|
|
|
edition: Edition2018,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: Some(
|
|
|
|
"0.1.0",
|
|
|
|
),
|
2021-08-23 12:18:11 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
|
|
|
"it",
|
|
|
|
),
|
|
|
|
canonical_name: "it",
|
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
"test",
|
|
|
|
],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
"test",
|
|
|
|
],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"target_data_layout not loaded",
|
|
|
|
),
|
2021-08-23 12:18:11 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {
|
|
|
|
"CARGO_PKG_LICENSE": "",
|
|
|
|
"CARGO_PKG_VERSION_MAJOR": "0",
|
|
|
|
"CARGO_MANIFEST_DIR": "$ROOT$hello-world",
|
|
|
|
"CARGO_PKG_VERSION": "0.1.0",
|
|
|
|
"CARGO_PKG_AUTHORS": "",
|
|
|
|
"CARGO_CRATE_NAME": "hello_world",
|
|
|
|
"CARGO_PKG_LICENSE_FILE": "",
|
|
|
|
"CARGO_PKG_HOMEPAGE": "",
|
|
|
|
"CARGO_PKG_DESCRIPTION": "",
|
|
|
|
"CARGO_PKG_NAME": "hello-world",
|
|
|
|
"CARGO_PKG_VERSION_PATCH": "0",
|
|
|
|
"CARGO": "cargo",
|
|
|
|
"CARGO_PKG_REPOSITORY": "",
|
|
|
|
"CARGO_PKG_VERSION_MINOR": "1",
|
|
|
|
"CARGO_PKG_VERSION_PRE": "",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
dependencies: [
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
0,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"hello_world",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
4,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"libc",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
|
|
|
],
|
2022-06-24 06:03:13 -05:00
|
|
|
proc_macro: Err(
|
2022-06-28 03:41:10 -05:00
|
|
|
"crate has not (yet) been built",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
origin: CratesIo {
|
|
|
|
repo: None,
|
2022-09-26 12:09:46 -05:00
|
|
|
name: Some(
|
|
|
|
"hello-world",
|
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
},
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-23 12:18:11 -05:00
|
|
|
},
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
4,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
2022-08-25 14:06:35 -05:00
|
|
|
5,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
edition: Edition2015,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: Some(
|
2022-08-25 14:06:35 -05:00
|
|
|
"0.2.98",
|
2021-10-30 09:17:04 -05:00
|
|
|
),
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"libc",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
canonical_name: "libc",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
2022-08-25 14:06:35 -05:00
|
|
|
"feature=default",
|
|
|
|
"feature=std",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
2022-08-25 14:06:35 -05:00
|
|
|
"feature=align",
|
|
|
|
"feature=const-extern-fn",
|
|
|
|
"feature=default",
|
|
|
|
"feature=extra_traits",
|
|
|
|
"feature=rustc-dep-of-std",
|
|
|
|
"feature=std",
|
|
|
|
"feature=use_std",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"target_data_layout not loaded",
|
|
|
|
),
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {
|
|
|
|
"CARGO_PKG_LICENSE": "",
|
|
|
|
"CARGO_PKG_VERSION_MAJOR": "0",
|
2022-08-25 14:06:35 -05:00
|
|
|
"CARGO_MANIFEST_DIR": "$ROOT$.cargo/registry/src/github.com-1ecc6299db9ec823/libc-0.2.98",
|
|
|
|
"CARGO_PKG_VERSION": "0.2.98",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
"CARGO_PKG_AUTHORS": "",
|
2022-08-25 14:06:35 -05:00
|
|
|
"CARGO_CRATE_NAME": "libc",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
"CARGO_PKG_LICENSE_FILE": "",
|
|
|
|
"CARGO_PKG_HOMEPAGE": "",
|
|
|
|
"CARGO_PKG_DESCRIPTION": "",
|
2022-08-25 14:06:35 -05:00
|
|
|
"CARGO_PKG_NAME": "libc",
|
|
|
|
"CARGO_PKG_VERSION_PATCH": "98",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
"CARGO": "cargo",
|
|
|
|
"CARGO_PKG_REPOSITORY": "",
|
2022-08-25 14:06:35 -05:00
|
|
|
"CARGO_PKG_VERSION_MINOR": "2",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
"CARGO_PKG_VERSION_PRE": "",
|
|
|
|
},
|
|
|
|
},
|
2022-08-25 14:06:35 -05:00
|
|
|
dependencies: [],
|
2022-06-24 06:03:13 -05:00
|
|
|
proc_macro: Err(
|
2022-06-28 03:41:10 -05:00
|
|
|
"crate has not (yet) been built",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
origin: CratesIo {
|
2022-08-25 14:06:35 -05:00
|
|
|
repo: Some(
|
|
|
|
"https://github.com/rust-lang/libc",
|
|
|
|
),
|
2022-09-26 12:09:46 -05:00
|
|
|
name: Some(
|
|
|
|
"libc",
|
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
},
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
},
|
2022-08-25 14:06:35 -05:00
|
|
|
},
|
|
|
|
}"#]],
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn cargo_hello_world_project_model() {
|
|
|
|
let crate_graph = load_cargo("hello-world-metadata.json");
|
|
|
|
check_crate_graph(
|
|
|
|
crate_graph,
|
|
|
|
expect![[r#"
|
|
|
|
CrateGraph {
|
|
|
|
arena: {
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
0,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
2022-08-25 14:06:35 -05:00
|
|
|
1,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
),
|
|
|
|
edition: Edition2018,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: Some(
|
|
|
|
"0.1.0",
|
|
|
|
),
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"hello_world",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
canonical_name: "hello-world",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
"test",
|
|
|
|
],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
"test",
|
|
|
|
],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"target_data_layout not loaded",
|
|
|
|
),
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {
|
|
|
|
"CARGO_PKG_LICENSE": "",
|
|
|
|
"CARGO_PKG_VERSION_MAJOR": "0",
|
|
|
|
"CARGO_MANIFEST_DIR": "$ROOT$hello-world",
|
|
|
|
"CARGO_PKG_VERSION": "0.1.0",
|
|
|
|
"CARGO_PKG_AUTHORS": "",
|
|
|
|
"CARGO_CRATE_NAME": "hello_world",
|
|
|
|
"CARGO_PKG_LICENSE_FILE": "",
|
|
|
|
"CARGO_PKG_HOMEPAGE": "",
|
|
|
|
"CARGO_PKG_DESCRIPTION": "",
|
|
|
|
"CARGO_PKG_NAME": "hello-world",
|
|
|
|
"CARGO_PKG_VERSION_PATCH": "0",
|
|
|
|
"CARGO": "cargo",
|
|
|
|
"CARGO_PKG_REPOSITORY": "",
|
|
|
|
"CARGO_PKG_VERSION_MINOR": "1",
|
|
|
|
"CARGO_PKG_VERSION_PRE": "",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
dependencies: [
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
4,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"libc",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
},
|
|
|
|
],
|
2022-06-24 06:03:13 -05:00
|
|
|
proc_macro: Err(
|
2022-06-28 03:41:10 -05:00
|
|
|
"crate has not (yet) been built",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
origin: CratesIo {
|
|
|
|
repo: None,
|
2022-09-26 12:09:46 -05:00
|
|
|
name: Some(
|
|
|
|
"hello-world",
|
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
},
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
1,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
2022-08-25 14:06:35 -05:00
|
|
|
2,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
edition: Edition2018,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: Some(
|
2022-08-25 14:06:35 -05:00
|
|
|
"0.1.0",
|
2021-10-30 09:17:04 -05:00
|
|
|
),
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"hello_world",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
canonical_name: "hello-world",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
2022-08-25 14:06:35 -05:00
|
|
|
"test",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
2022-08-25 14:06:35 -05:00
|
|
|
"test",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"target_data_layout not loaded",
|
|
|
|
),
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {
|
|
|
|
"CARGO_PKG_LICENSE": "",
|
|
|
|
"CARGO_PKG_VERSION_MAJOR": "0",
|
2022-08-25 14:06:35 -05:00
|
|
|
"CARGO_MANIFEST_DIR": "$ROOT$hello-world",
|
|
|
|
"CARGO_PKG_VERSION": "0.1.0",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
"CARGO_PKG_AUTHORS": "",
|
2022-08-25 14:06:35 -05:00
|
|
|
"CARGO_CRATE_NAME": "hello_world",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
"CARGO_PKG_LICENSE_FILE": "",
|
|
|
|
"CARGO_PKG_HOMEPAGE": "",
|
|
|
|
"CARGO_PKG_DESCRIPTION": "",
|
2022-08-25 14:06:35 -05:00
|
|
|
"CARGO_PKG_NAME": "hello-world",
|
|
|
|
"CARGO_PKG_VERSION_PATCH": "0",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
"CARGO": "cargo",
|
|
|
|
"CARGO_PKG_REPOSITORY": "",
|
2022-08-25 14:06:35 -05:00
|
|
|
"CARGO_PKG_VERSION_MINOR": "1",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
"CARGO_PKG_VERSION_PRE": "",
|
|
|
|
},
|
|
|
|
},
|
2022-08-25 14:06:35 -05:00
|
|
|
dependencies: [
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
0,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"hello_world",
|
|
|
|
),
|
|
|
|
prelude: true,
|
|
|
|
},
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
4,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"libc",
|
|
|
|
),
|
|
|
|
prelude: true,
|
|
|
|
},
|
|
|
|
],
|
2022-06-24 06:03:13 -05:00
|
|
|
proc_macro: Err(
|
2022-06-28 03:41:10 -05:00
|
|
|
"crate has not (yet) been built",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
origin: CratesIo {
|
2022-08-25 14:06:35 -05:00
|
|
|
repo: None,
|
2022-09-26 12:09:46 -05:00
|
|
|
name: Some(
|
|
|
|
"hello-world",
|
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
},
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
2,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
2022-08-25 14:06:35 -05:00
|
|
|
3,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
),
|
|
|
|
edition: Edition2018,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: Some(
|
|
|
|
"0.1.0",
|
|
|
|
),
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"an_example",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
canonical_name: "an-example",
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
"test",
|
|
|
|
],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
"test",
|
|
|
|
],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"target_data_layout not loaded",
|
|
|
|
),
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {
|
|
|
|
"CARGO_PKG_LICENSE": "",
|
|
|
|
"CARGO_PKG_VERSION_MAJOR": "0",
|
|
|
|
"CARGO_MANIFEST_DIR": "$ROOT$hello-world",
|
|
|
|
"CARGO_PKG_VERSION": "0.1.0",
|
|
|
|
"CARGO_PKG_AUTHORS": "",
|
|
|
|
"CARGO_CRATE_NAME": "hello_world",
|
|
|
|
"CARGO_PKG_LICENSE_FILE": "",
|
|
|
|
"CARGO_PKG_HOMEPAGE": "",
|
|
|
|
"CARGO_PKG_DESCRIPTION": "",
|
|
|
|
"CARGO_PKG_NAME": "hello-world",
|
|
|
|
"CARGO_PKG_VERSION_PATCH": "0",
|
|
|
|
"CARGO": "cargo",
|
|
|
|
"CARGO_PKG_REPOSITORY": "",
|
|
|
|
"CARGO_PKG_VERSION_MINOR": "1",
|
|
|
|
"CARGO_PKG_VERSION_PRE": "",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
dependencies: [
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
0,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"hello_world",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
},
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
4,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"libc",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
},
|
|
|
|
],
|
2022-06-24 06:03:13 -05:00
|
|
|
proc_macro: Err(
|
2022-06-28 03:41:10 -05:00
|
|
|
"crate has not (yet) been built",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
origin: CratesIo {
|
|
|
|
repo: None,
|
2022-09-26 12:09:46 -05:00
|
|
|
name: Some(
|
|
|
|
"hello-world",
|
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
},
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
|
|
|
3,
|
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
|
|
|
4,
|
|
|
|
),
|
|
|
|
edition: Edition2018,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: Some(
|
|
|
|
"0.1.0",
|
|
|
|
),
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
|
|
|
"it",
|
|
|
|
),
|
|
|
|
canonical_name: "it",
|
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
"test",
|
|
|
|
],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
"test",
|
|
|
|
],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"target_data_layout not loaded",
|
|
|
|
),
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {
|
|
|
|
"CARGO_PKG_LICENSE": "",
|
|
|
|
"CARGO_PKG_VERSION_MAJOR": "0",
|
|
|
|
"CARGO_MANIFEST_DIR": "$ROOT$hello-world",
|
|
|
|
"CARGO_PKG_VERSION": "0.1.0",
|
|
|
|
"CARGO_PKG_AUTHORS": "",
|
|
|
|
"CARGO_CRATE_NAME": "hello_world",
|
|
|
|
"CARGO_PKG_LICENSE_FILE": "",
|
|
|
|
"CARGO_PKG_HOMEPAGE": "",
|
|
|
|
"CARGO_PKG_DESCRIPTION": "",
|
|
|
|
"CARGO_PKG_NAME": "hello-world",
|
|
|
|
"CARGO_PKG_VERSION_PATCH": "0",
|
|
|
|
"CARGO": "cargo",
|
|
|
|
"CARGO_PKG_REPOSITORY": "",
|
|
|
|
"CARGO_PKG_VERSION_MINOR": "1",
|
|
|
|
"CARGO_PKG_VERSION_PRE": "",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
dependencies: [
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
0,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"hello_world",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
},
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
4,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"libc",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
},
|
|
|
|
],
|
2022-06-24 06:03:13 -05:00
|
|
|
proc_macro: Err(
|
2022-06-28 03:41:10 -05:00
|
|
|
"crate has not (yet) been built",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
origin: CratesIo {
|
|
|
|
repo: None,
|
2022-09-26 12:09:46 -05:00
|
|
|
name: Some(
|
|
|
|
"hello-world",
|
|
|
|
),
|
2021-11-22 11:44:46 -06:00
|
|
|
},
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
},
|
2022-08-25 14:06:35 -05:00
|
|
|
CrateId(
|
|
|
|
4,
|
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
|
|
|
5,
|
|
|
|
),
|
|
|
|
edition: Edition2015,
|
|
|
|
version: Some(
|
|
|
|
"0.2.98",
|
|
|
|
),
|
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
|
|
|
"libc",
|
|
|
|
),
|
|
|
|
canonical_name: "libc",
|
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
"feature=default",
|
|
|
|
"feature=std",
|
|
|
|
],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[
|
|
|
|
"debug_assertions",
|
|
|
|
"feature=align",
|
|
|
|
"feature=const-extern-fn",
|
|
|
|
"feature=default",
|
|
|
|
"feature=extra_traits",
|
|
|
|
"feature=rustc-dep-of-std",
|
|
|
|
"feature=std",
|
|
|
|
"feature=use_std",
|
|
|
|
],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"target_data_layout not loaded",
|
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {
|
|
|
|
"CARGO_PKG_LICENSE": "",
|
|
|
|
"CARGO_PKG_VERSION_MAJOR": "0",
|
|
|
|
"CARGO_MANIFEST_DIR": "$ROOT$.cargo/registry/src/github.com-1ecc6299db9ec823/libc-0.2.98",
|
|
|
|
"CARGO_PKG_VERSION": "0.2.98",
|
|
|
|
"CARGO_PKG_AUTHORS": "",
|
|
|
|
"CARGO_CRATE_NAME": "libc",
|
|
|
|
"CARGO_PKG_LICENSE_FILE": "",
|
|
|
|
"CARGO_PKG_HOMEPAGE": "",
|
|
|
|
"CARGO_PKG_DESCRIPTION": "",
|
|
|
|
"CARGO_PKG_NAME": "libc",
|
|
|
|
"CARGO_PKG_VERSION_PATCH": "98",
|
|
|
|
"CARGO": "cargo",
|
|
|
|
"CARGO_PKG_REPOSITORY": "",
|
|
|
|
"CARGO_PKG_VERSION_MINOR": "2",
|
|
|
|
"CARGO_PKG_VERSION_PRE": "",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
dependencies: [],
|
|
|
|
proc_macro: Err(
|
|
|
|
"crate has not (yet) been built",
|
|
|
|
),
|
|
|
|
origin: CratesIo {
|
|
|
|
repo: Some(
|
|
|
|
"https://github.com/rust-lang/libc",
|
|
|
|
),
|
2022-09-26 12:09:46 -05:00
|
|
|
name: Some(
|
|
|
|
"libc",
|
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
},
|
|
|
|
is_proc_macro: false,
|
|
|
|
},
|
internal: add simple smoke test for project model
Our project model code is rather complicated -- the logic for lowering
from `cargo metadata` to `CrateGraph` is fiddly and special-case. So
far, we survived without testing this at all, but this increasingly
seems like a poor option.
So this PR introduces a simple tests just to detect the most obvious
failures. The idea here is that, although we rely on external processes
(cargo & rustc), we are actually using their stable interfaces, so we
might just mock out the outputs.
Long term, I would like to try to virtualize IO here, so as to do such
mocking in a more principled way, but lets start simple.
Should we forgo the mocking and just call `cargo metadata` directly
perhaps? Touch question -- I personally feel that fast, in-process tests
are more important in this case than any extra assurance we get from
running the real thing.
Super-long term, we would probably want to extend our heavy tests to
cover more use-cases, but we should figure a way to do that without
slowing the tests down for everyone.
Perhaps we need two-tiered bors system, where we pull from `master` into
`release` branch only when an additional set of tests passes?
2021-07-20 07:38:20 -05:00
|
|
|
},
|
|
|
|
}"#]],
|
|
|
|
)
|
|
|
|
}
|
2021-08-01 00:47:21 -05:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn rust_project_hello_world_project_model() {
|
|
|
|
let crate_graph = load_rust_project("hello-world-project.json");
|
|
|
|
check_crate_graph(
|
|
|
|
crate_graph,
|
|
|
|
expect![[r#"
|
|
|
|
CrateGraph {
|
|
|
|
arena: {
|
|
|
|
CrateId(
|
|
|
|
0,
|
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
|
|
|
1,
|
|
|
|
),
|
2023-01-31 04:39:25 -06:00
|
|
|
edition: Edition2021,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: None,
|
2021-08-01 00:47:21 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
|
|
|
"alloc",
|
|
|
|
),
|
|
|
|
canonical_name: "alloc",
|
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"rust-project.json projects have no target layout set",
|
|
|
|
),
|
2021-08-01 00:47:21 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {},
|
|
|
|
},
|
|
|
|
dependencies: [
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
1,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"core",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
],
|
2022-06-28 03:41:10 -05:00
|
|
|
proc_macro: Err(
|
|
|
|
"no proc macro loaded for sysroot crate",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2022-03-26 15:22:35 -05:00
|
|
|
origin: Lang(
|
|
|
|
Alloc,
|
|
|
|
),
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
1,
|
2021-08-01 00:47:21 -05:00
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
2022-08-25 14:06:35 -05:00
|
|
|
2,
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2023-01-31 04:39:25 -06:00
|
|
|
edition: Edition2021,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: None,
|
2021-08-01 00:47:21 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"core",
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
canonical_name: "core",
|
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"rust-project.json projects have no target layout set",
|
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {},
|
|
|
|
},
|
|
|
|
dependencies: [],
|
|
|
|
proc_macro: Err(
|
|
|
|
"no proc macro loaded for sysroot crate",
|
|
|
|
),
|
|
|
|
origin: Lang(
|
|
|
|
Core,
|
|
|
|
),
|
|
|
|
is_proc_macro: false,
|
|
|
|
},
|
|
|
|
CrateId(
|
|
|
|
2,
|
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
|
|
|
3,
|
|
|
|
),
|
2023-01-31 04:39:25 -06:00
|
|
|
edition: Edition2021,
|
2022-08-25 14:06:35 -05:00
|
|
|
version: None,
|
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
|
|
|
"panic_abort",
|
|
|
|
),
|
|
|
|
canonical_name: "panic_abort",
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"rust-project.json projects have no target layout set",
|
|
|
|
),
|
2021-08-01 00:47:21 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {},
|
|
|
|
},
|
|
|
|
dependencies: [],
|
2022-06-28 03:41:10 -05:00
|
|
|
proc_macro: Err(
|
|
|
|
"no proc macro loaded for sysroot crate",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2022-03-26 15:22:35 -05:00
|
|
|
origin: Lang(
|
|
|
|
Other,
|
|
|
|
),
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
3,
|
2021-08-01 00:47:21 -05:00
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
2022-08-25 14:06:35 -05:00
|
|
|
4,
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2023-01-31 04:39:25 -06:00
|
|
|
edition: Edition2021,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: None,
|
2021-08-01 00:47:21 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"panic_unwind",
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
canonical_name: "panic_unwind",
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"rust-project.json projects have no target layout set",
|
|
|
|
),
|
2021-08-01 00:47:21 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {},
|
|
|
|
},
|
|
|
|
dependencies: [],
|
2022-06-28 03:41:10 -05:00
|
|
|
proc_macro: Err(
|
|
|
|
"no proc macro loaded for sysroot crate",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2022-03-26 15:22:35 -05:00
|
|
|
origin: Lang(
|
|
|
|
Other,
|
|
|
|
),
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
|
|
|
4,
|
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
|
|
|
5,
|
|
|
|
),
|
2023-01-31 04:39:25 -06:00
|
|
|
edition: Edition2021,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: None,
|
2021-08-01 00:47:21 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
|
|
|
"proc_macro",
|
|
|
|
),
|
|
|
|
canonical_name: "proc_macro",
|
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"rust-project.json projects have no target layout set",
|
|
|
|
),
|
2021-08-01 00:47:21 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {},
|
|
|
|
},
|
|
|
|
dependencies: [
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
6,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"std",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
],
|
2022-06-28 03:41:10 -05:00
|
|
|
proc_macro: Err(
|
|
|
|
"no proc macro loaded for sysroot crate",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2022-03-26 15:22:35 -05:00
|
|
|
origin: Lang(
|
2022-04-10 05:42:16 -05:00
|
|
|
Other,
|
2022-03-26 15:22:35 -05:00
|
|
|
),
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
5,
|
2021-08-01 00:47:21 -05:00
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
2022-08-25 14:06:35 -05:00
|
|
|
6,
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2023-01-31 04:39:25 -06:00
|
|
|
edition: Edition2021,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: None,
|
2021-08-01 00:47:21 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"profiler_builtins",
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
canonical_name: "profiler_builtins",
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"rust-project.json projects have no target layout set",
|
|
|
|
),
|
2021-08-01 00:47:21 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {},
|
|
|
|
},
|
|
|
|
dependencies: [],
|
2022-06-28 03:41:10 -05:00
|
|
|
proc_macro: Err(
|
|
|
|
"no proc macro loaded for sysroot crate",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2022-03-26 15:22:35 -05:00
|
|
|
origin: Lang(
|
2022-08-25 14:06:35 -05:00
|
|
|
Other,
|
2022-03-26 15:22:35 -05:00
|
|
|
),
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
6,
|
2021-08-01 00:47:21 -05:00
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
2022-08-25 14:06:35 -05:00
|
|
|
7,
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2023-01-31 04:39:25 -06:00
|
|
|
edition: Edition2021,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: None,
|
2021-08-01 00:47:21 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"std",
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
canonical_name: "std",
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"rust-project.json projects have no target layout set",
|
|
|
|
),
|
2021-08-01 00:47:21 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {},
|
|
|
|
},
|
|
|
|
dependencies: [
|
2022-08-25 14:06:35 -05:00
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
0,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"alloc",
|
|
|
|
),
|
|
|
|
prelude: true,
|
|
|
|
},
|
2021-08-01 00:47:21 -05:00
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
2022-11-07 04:45:52 -06:00
|
|
|
3,
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
|
|
|
name: CrateName(
|
2022-11-07 04:45:52 -06:00
|
|
|
"panic_unwind",
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
2,
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
|
|
|
name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"panic_abort",
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
2022-11-07 04:45:52 -06:00
|
|
|
1,
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
|
|
|
name: CrateName(
|
2022-11-07 04:45:52 -06:00
|
|
|
"core",
|
2022-08-25 14:06:35 -05:00
|
|
|
),
|
|
|
|
prelude: true,
|
|
|
|
},
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
5,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"profiler_builtins",
|
|
|
|
),
|
|
|
|
prelude: true,
|
|
|
|
},
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
2022-11-07 04:45:52 -06:00
|
|
|
9,
|
2022-08-25 14:06:35 -05:00
|
|
|
),
|
|
|
|
name: CrateName(
|
2022-11-07 04:45:52 -06:00
|
|
|
"unwind",
|
2022-08-25 14:06:35 -05:00
|
|
|
),
|
|
|
|
prelude: true,
|
|
|
|
},
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
2022-11-07 04:45:52 -06:00
|
|
|
7,
|
2022-08-25 14:06:35 -05:00
|
|
|
),
|
|
|
|
name: CrateName(
|
2022-11-07 04:45:52 -06:00
|
|
|
"std_detect",
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
2021-09-28 14:39:41 -05:00
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
2022-11-07 04:45:52 -06:00
|
|
|
8,
|
2021-09-28 14:39:41 -05:00
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"test",
|
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
prelude: true,
|
|
|
|
},
|
2021-08-01 00:47:21 -05:00
|
|
|
],
|
2022-06-24 06:03:13 -05:00
|
|
|
proc_macro: Err(
|
2022-08-25 14:06:35 -05:00
|
|
|
"no proc macro loaded for sysroot crate",
|
|
|
|
),
|
|
|
|
origin: Lang(
|
|
|
|
Std,
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
7,
|
2021-08-01 00:47:21 -05:00
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
2022-08-25 14:06:35 -05:00
|
|
|
8,
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2023-01-31 04:39:25 -06:00
|
|
|
edition: Edition2021,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: None,
|
2021-08-01 00:47:21 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"std_detect",
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
canonical_name: "std_detect",
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"rust-project.json projects have no target layout set",
|
|
|
|
),
|
2021-08-01 00:47:21 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {},
|
|
|
|
},
|
|
|
|
dependencies: [],
|
2022-06-28 03:41:10 -05:00
|
|
|
proc_macro: Err(
|
|
|
|
"no proc macro loaded for sysroot crate",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2022-03-26 15:22:35 -05:00
|
|
|
origin: Lang(
|
|
|
|
Other,
|
|
|
|
),
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
8,
|
2021-08-01 00:47:21 -05:00
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
2022-08-25 14:06:35 -05:00
|
|
|
9,
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2023-01-31 04:39:25 -06:00
|
|
|
edition: Edition2021,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: None,
|
2021-08-01 00:47:21 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"test",
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
canonical_name: "test",
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"rust-project.json projects have no target layout set",
|
|
|
|
),
|
2021-08-01 00:47:21 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {},
|
|
|
|
},
|
|
|
|
dependencies: [],
|
2022-06-28 03:41:10 -05:00
|
|
|
proc_macro: Err(
|
|
|
|
"no proc macro loaded for sysroot crate",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2022-03-26 15:22:35 -05:00
|
|
|
origin: Lang(
|
2022-08-25 14:06:35 -05:00
|
|
|
Test,
|
2022-03-26 15:22:35 -05:00
|
|
|
),
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
2022-11-07 04:45:52 -06:00
|
|
|
9,
|
2021-08-01 00:47:21 -05:00
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
2022-11-07 04:45:52 -06:00
|
|
|
10,
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2023-01-31 04:39:25 -06:00
|
|
|
edition: Edition2021,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: None,
|
2021-08-01 00:47:21 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"unwind",
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
canonical_name: "unwind",
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"rust-project.json projects have no target layout set",
|
|
|
|
),
|
2021-08-01 00:47:21 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {},
|
|
|
|
},
|
|
|
|
dependencies: [],
|
2022-06-28 03:41:10 -05:00
|
|
|
proc_macro: Err(
|
|
|
|
"no proc macro loaded for sysroot crate",
|
2022-06-15 11:07:37 -05:00
|
|
|
),
|
2022-03-26 15:22:35 -05:00
|
|
|
origin: Lang(
|
2022-08-25 14:06:35 -05:00
|
|
|
Other,
|
2022-03-26 15:22:35 -05:00
|
|
|
),
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
CrateId(
|
2022-11-07 04:45:52 -06:00
|
|
|
10,
|
2021-08-01 00:47:21 -05:00
|
|
|
): CrateData {
|
|
|
|
root_file_id: FileId(
|
2022-11-07 04:45:52 -06:00
|
|
|
11,
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
|
|
|
edition: Edition2018,
|
2021-10-30 09:17:04 -05:00
|
|
|
version: None,
|
2021-08-01 00:47:21 -05:00
|
|
|
display_name: Some(
|
|
|
|
CrateDisplayName {
|
|
|
|
crate_name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"hello_world",
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
canonical_name: "hello_world",
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
),
|
|
|
|
cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
|
|
|
potential_cfg_options: CfgOptions(
|
|
|
|
[],
|
|
|
|
),
|
2023-01-19 12:21:44 -06:00
|
|
|
target_layout: Err(
|
|
|
|
"rust-project.json projects have no target layout set",
|
|
|
|
),
|
2021-08-01 00:47:21 -05:00
|
|
|
env: Env {
|
|
|
|
entries: {},
|
|
|
|
},
|
|
|
|
dependencies: [
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
|
|
|
1,
|
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"core",
|
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
0,
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
|
|
|
name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"alloc",
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
2022-08-25 14:06:35 -05:00
|
|
|
6,
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
|
|
|
name: CrateName(
|
2022-08-25 14:06:35 -05:00
|
|
|
"std",
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2021-09-28 14:23:46 -05:00
|
|
|
prelude: true,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
Dependency {
|
|
|
|
crate_id: CrateId(
|
2022-11-07 04:45:52 -06:00
|
|
|
8,
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
|
|
|
name: CrateName(
|
|
|
|
"test",
|
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
prelude: false,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
],
|
2022-06-28 03:41:10 -05:00
|
|
|
proc_macro: Err(
|
2022-08-25 14:06:35 -05:00
|
|
|
"no proc macro dylib present",
|
2021-08-01 00:47:21 -05:00
|
|
|
),
|
2022-08-25 14:06:35 -05:00
|
|
|
origin: CratesIo {
|
|
|
|
repo: None,
|
2022-09-26 12:09:46 -05:00
|
|
|
name: Some(
|
|
|
|
"hello_world",
|
|
|
|
),
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
2022-03-09 15:34:42 -06:00
|
|
|
is_proc_macro: false,
|
2021-08-01 00:47:21 -05:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}"#]],
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn rust_project_is_proc_macro_has_proc_macro_dep() {
|
|
|
|
let crate_graph = load_rust_project("is-proc-macro-project.json");
|
|
|
|
// Since the project only defines one crate (outside the sysroot crates),
|
|
|
|
// it should be the one with the biggest Id.
|
|
|
|
let crate_id = crate_graph.iter().max().unwrap();
|
|
|
|
let crate_data = &crate_graph[crate_id];
|
|
|
|
// Assert that the project crate with `is_proc_macro` has a dependency
|
|
|
|
// on the proc_macro sysroot crate.
|
|
|
|
crate_data.dependencies.iter().find(|&dep| dep.name.deref() == "proc_macro").unwrap();
|
|
|
|
}
|