Skip to content

Commit ae509e8

Browse files
committed
Add can_fail flag to Unit that allows compilation to proceed even if
specific units fail. All Docscrape units now have can_fail = true to avoid stopping Rustdoc if an example fails to scrape.
1 parent d583b21 commit ae509e8

File tree

11 files changed

+205
-16
lines changed

11 files changed

+205
-16
lines changed

src/cargo/core/compiler/context/mod.rs

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,13 @@ pub struct Context<'a, 'cfg> {
7777
/// Map of Doc/Docscrape units to metadata for their -Cmetadata flag.
7878
/// See Context::find_metadata_units for more details.
7979
pub metadata_for_doc_units: HashMap<Unit, Metadata>,
80+
81+
/// Map that tracks whether a unit completed successfully. Used in conjuction
82+
/// with the `Unit::can_fail` flag, so jobs can dynamically track at runtime
83+
/// whether their dependencies succeeded or failed. Currently used for
84+
/// the Rustdoc scrape-examples feature to allow Rustdoc to proceed even if
85+
/// examples fail to compile.
86+
pub completed_units: Arc<Mutex<HashMap<Metadata, bool>>>,
8087
}
8188

8289
impl<'a, 'cfg> Context<'a, 'cfg> {
@@ -115,6 +122,7 @@ impl<'a, 'cfg> Context<'a, 'cfg> {
115122
rustc_clients: HashMap::new(),
116123
lto: HashMap::new(),
117124
metadata_for_doc_units: HashMap::new(),
125+
completed_units: Arc::new(Mutex::new(HashMap::new())),
118126
})
119127
}
120128

src/cargo/core/compiler/job_queue.rs

Lines changed: 45 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,10 @@ struct DrainState<'cfg> {
126126
total_units: usize,
127127

128128
queue: DependencyQueue<Unit, Artifact, Job>,
129+
/// Dependency map that is like JobQueue::dep_map, except with Job information removed.
130+
/// Used to determine if a unit's dependencies have failed, see
131+
/// [`DrainState::spawn_work_if_possible`].
132+
dep_map: HashMap<Unit, HashSet<(Unit, Artifact)>>,
129133
messages: Arc<Queue<Message>>,
130134
/// Diagnostic deduplication support.
131135
diag_dedupe: DiagDedupe<'cfg>,
@@ -506,8 +510,15 @@ impl<'cfg> JobQueue<'cfg> {
506510
self.queue.queue_finished();
507511

508512
let progress = Progress::with_style("Building", ProgressStyle::Ratio, cx.bcx.config);
513+
let dep_map = self
514+
.queue
515+
.dep_map()
516+
.iter()
517+
.map(|(unit, (deps, _))| (unit.clone(), deps.clone()))
518+
.collect();
509519
let state = DrainState {
510520
total_units: self.queue.len(),
521+
dep_map,
511522
queue: self.queue,
512523
// 100 here is somewhat arbitrary. It is a few screenfulls of
513524
// output, and hopefully at most a few megabytes of memory for
@@ -578,6 +589,32 @@ impl<'cfg> DrainState<'cfg> {
578589
// start requesting job tokens. Each job after the first needs to
579590
// request a token.
580591
while let Some((unit, job)) = self.queue.dequeue() {
592+
// First, we handle the special case of fallible units. If
593+
// this unit is allowed to fail, and any one of its dependencies
594+
// has failed, then we should immediately mark it as failed and
595+
// skip executing it.
596+
if unit.can_fail {
597+
let mut completed_units = cx.completed_units.lock().unwrap();
598+
let failed_deps = self.dep_map[&unit]
599+
.iter()
600+
.filter(|(dep_unit, _)| {
601+
let dep_meta = cx.files().metadata(dep_unit);
602+
!completed_units[&dep_meta]
603+
})
604+
.map(|(_, artifact)| artifact)
605+
.collect::<HashSet<_>>();
606+
if !failed_deps.is_empty() {
607+
// TODO: should put a warning here saying which units were skipped
608+
// due to failed dependencies.
609+
for artifact in failed_deps {
610+
self.queue.finish(&unit, artifact);
611+
}
612+
let unit_meta = cx.files().metadata(&unit);
613+
completed_units.insert(unit_meta, false);
614+
continue;
615+
}
616+
}
617+
581618
self.pending_queue.push((unit, job));
582619
if self.active.len() + self.pending_queue.len() > 1 {
583620
jobserver_helper.request_token();
@@ -713,7 +750,8 @@ impl<'cfg> DrainState<'cfg> {
713750
};
714751
debug!("end ({:?}): {:?}", unit, result);
715752
match result {
716-
Ok(()) => self.finish(id, &unit, artifact, cx)?,
753+
Ok(()) => self.finish(id, &unit, artifact, cx, true)?,
754+
Err(_) if unit.can_fail => self.finish(id, &unit, artifact, cx, false)?,
717755
Err(error) => {
718756
let msg = "The following warnings were emitted during compilation:";
719757
self.emit_warnings(Some(msg), &unit, cx)?;
@@ -1161,6 +1199,7 @@ impl<'cfg> DrainState<'cfg> {
11611199
unit: &Unit,
11621200
artifact: Artifact,
11631201
cx: &mut Context<'_, '_>,
1202+
success: bool,
11641203
) -> CargoResult<()> {
11651204
if unit.mode.is_run_custom_build() && unit.show_warnings(cx.bcx.config) {
11661205
self.emit_warnings(None, unit, cx)?;
@@ -1170,6 +1209,11 @@ impl<'cfg> DrainState<'cfg> {
11701209
Artifact::All => self.timings.unit_finished(id, unlocked),
11711210
Artifact::Metadata => self.timings.unit_rmeta_finished(id, unlocked),
11721211
}
1212+
cx.completed_units
1213+
.lock()
1214+
.unwrap()
1215+
.insert(cx.files().metadata(unit), success);
1216+
11731217
Ok(())
11741218
}
11751219

src/cargo/core/compiler/mod.rs

Lines changed: 30 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ mod unit;
2222
pub mod unit_dependencies;
2323
pub mod unit_graph;
2424

25-
use std::collections::HashSet;
25+
use std::collections::{HashMap, HashSet};
2626
use std::env;
2727
use std::ffi::{OsStr, OsString};
2828
use std::fs::{self, File};
@@ -639,9 +639,9 @@ fn rustdoc(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult<Work> {
639639
let metadata = cx.metadata_for_doc_units[unit];
640640
rustdoc.arg("-C").arg(format!("metadata={}", metadata));
641641

642-
let scrape_output_path = |unit: &Unit| -> CargoResult<PathBuf> {
642+
let scrape_output_path = |unit: &Unit| -> PathBuf {
643643
let output_dir = cx.files().deps_dir(unit);
644-
Ok(output_dir.join(format!("{}.examples", unit.buildkey())))
644+
output_dir.join(format!("{}.examples", unit.buildkey()))
645645
};
646646

647647
if unit.mode.is_doc_scrape() {
@@ -651,7 +651,7 @@ fn rustdoc(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult<Work> {
651651

652652
rustdoc
653653
.arg("--scrape-examples-output-path")
654-
.arg(scrape_output_path(unit)?);
654+
.arg(scrape_output_path(unit));
655655

656656
// Only scrape example for items from crates in the workspace, to reduce generated file size
657657
for pkg in cx.bcx.ws.members() {
@@ -664,18 +664,18 @@ fn rustdoc(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult<Work> {
664664
rustdoc.arg("--scrape-examples-target-crate").arg(name);
665665
}
666666
}
667-
} else if cx.bcx.scrape_units.len() > 0 && cx.bcx.ws.unit_needs_doc_scrape(unit) {
668-
// We only pass scraped examples to packages in the workspace
669-
// since examples are only coming from reverse-dependencies of workspace packages
667+
}
670668

669+
let should_include_scrape_units =
670+
cx.bcx.scrape_units.len() > 0 && cx.bcx.ws.unit_needs_doc_scrape(unit);
671+
let scrape_outputs = should_include_scrape_units.then(|| {
671672
rustdoc.arg("-Zunstable-options");
672-
673-
for scrape_unit in &cx.bcx.scrape_units {
674-
rustdoc
675-
.arg("--with-examples")
676-
.arg(scrape_output_path(scrape_unit)?);
677-
}
678-
}
673+
cx.bcx
674+
.scrape_units
675+
.iter()
676+
.map(|unit| (cx.files().metadata(unit), scrape_output_path(unit)))
677+
.collect::<HashMap<_, _>>()
678+
});
679679

680680
build_deps_args(&mut rustdoc, cx, unit)?;
681681
rustdoc::add_root_urls(cx, unit, &mut rustdoc)?;
@@ -693,19 +693,35 @@ fn rustdoc(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult<Work> {
693693
let target = Target::clone(&unit.target);
694694
let mut output_options = OutputOptions::new(cx, unit);
695695
let script_metadata = cx.find_build_script_metadata(unit);
696+
let completed_units = Arc::clone(&cx.completed_units);
696697
Ok(Work::new(move |state| {
697698
add_custom_flags(
698699
&mut rustdoc,
699700
&build_script_outputs.lock().unwrap(),
700701
script_metadata,
701702
)?;
703+
704+
// Add the output of scraped examples to the rustdoc command.
705+
// This action must happen after the unit's dependencies have finished,
706+
// because some of those deps may be Docscrape units which have failed.
707+
// So we dynamically determine which `--with-examples` flags to pass here.
708+
if let Some(scrape_outputs) = scrape_outputs {
709+
let completed_units = completed_units.lock().unwrap();
710+
for (metadata, output_path) in &scrape_outputs {
711+
if completed_units[metadata] {
712+
rustdoc.arg("--with-examples").arg(output_path);
713+
}
714+
}
715+
}
716+
702717
let crate_dir = doc_dir.join(&crate_name);
703718
if crate_dir.exists() {
704719
// Remove output from a previous build. This ensures that stale
705720
// files for removed items are removed.
706721
debug!("removing pre-existing doc directory {:?}", crate_dir);
707722
paths::remove_dir_all(crate_dir)?;
708723
}
724+
709725
state.running(&rustdoc);
710726

711727
rustdoc

src/cargo/core/compiler/standard_lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -217,6 +217,7 @@ pub fn generate_std_roots(
217217
/*is_std*/ true,
218218
/*dep_hash*/ 0,
219219
IsArtifact::No,
220+
false,
220221
));
221222
}
222223
}

src/cargo/core/compiler/unit.rs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,7 @@ pub struct UnitInner {
7272
/// This value initially starts as 0, and then is filled in via a
7373
/// second-pass after all the unit dependencies have been computed.
7474
pub dep_hash: u64,
75+
pub can_fail: bool,
7576
}
7677

7778
impl UnitInner {
@@ -141,6 +142,7 @@ impl fmt::Debug for Unit {
141142
.field("artifact", &self.artifact.is_true())
142143
.field("is_std", &self.is_std)
143144
.field("dep_hash", &self.dep_hash)
145+
.field("can_fail", &self.can_fail)
144146
.finish()
145147
}
146148
}
@@ -184,6 +186,7 @@ impl UnitInterner {
184186
is_std: bool,
185187
dep_hash: u64,
186188
artifact: IsArtifact,
189+
can_fail: bool,
187190
) -> Unit {
188191
let target = match (is_std, target.kind()) {
189192
// This is a horrible hack to support build-std. `libstd` declares
@@ -216,6 +219,7 @@ impl UnitInterner {
216219
is_std,
217220
dep_hash,
218221
artifact,
222+
can_fail,
219223
});
220224
Unit { inner }
221225
}

src/cargo/core/compiler/unit_dependencies.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -888,6 +888,7 @@ fn new_unit_dep_with_profile(
888888
state.is_std,
889889
/*dep_hash*/ 0,
890890
artifact.map_or(IsArtifact::No, |_| IsArtifact::Yes),
891+
parent.can_fail,
891892
);
892893
Ok(UnitDep {
893894
unit,

src/cargo/core/features.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -663,6 +663,7 @@ unstable_cli_options!(
663663
// TODO(wcrichto): move scrape example configuration into Cargo.toml before stabilization
664664
// See: https://github.com/rust-lang/cargo/pull/9525#discussion_r728470927
665665
rustdoc_scrape_examples: Option<String> = ("Allow rustdoc to scrape examples from reverse-dependencies for documentation"),
666+
ignore_scrape_failures: bool = ("When scraping examples for Rustdoc, don't stop compilation if an example fails"),
666667
skip_rustdoc_fingerprint: bool = (HIDDEN),
667668
);
668669

@@ -936,6 +937,7 @@ impl CliUnstable {
936937
)
937938
}
938939
}
940+
"ignore-scrape-failures" => self.ignore_scrape_failures = parse_empty(k, v)?,
939941
"skip-rustdoc-fingerprint" => self.skip_rustdoc_fingerprint = parse_empty(k, v)?,
940942
"compile-progress" => stabilized_warn(k, "1.30", STABILIZED_COMPILE_PROGRESS),
941943
"offline" => stabilized_err(k, "1.36", STABILIZED_OFFLINE)?,

src/cargo/core/workspace.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1517,7 +1517,7 @@ impl<'cfg> Workspace<'cfg> {
15171517
// (not documented) or proc macros (have no scrape-able exports). Additionally,
15181518
// naively passing a proc macro's unit_for to new_unit_dep will currently cause
15191519
// Cargo to panic, see issue #10545.
1520-
self.is_member(&unit.pkg) && !unit.target.for_host()
1520+
self.is_member(&unit.pkg) && !unit.target.for_host() && unit.mode.is_doc()
15211521
}
15221522
}
15231523

src/cargo/ops/cargo_compile.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1069,6 +1069,7 @@ fn generate_targets(
10691069
/*is_std*/ false,
10701070
/*dep_hash*/ 0,
10711071
IsArtifact::No,
1072+
mode.is_doc_scrape() && ws.config().cli_unstable().ignore_scrape_failures,
10721073
);
10731074
units.insert(unit);
10741075
}
@@ -1631,6 +1632,7 @@ fn traverse_and_share(
16311632
unit.is_std,
16321633
new_dep_hash,
16331634
unit.artifact,
1635+
unit.can_fail,
16341636
);
16351637
assert!(memo.insert(unit.clone(), new_unit.clone()).is_none());
16361638
new_graph.entry(new_unit.clone()).or_insert(new_deps);
@@ -1872,6 +1874,7 @@ fn override_rustc_crate_types(
18721874
unit.is_std,
18731875
unit.dep_hash,
18741876
unit.artifact,
1877+
unit.can_fail,
18751878
)
18761879
};
18771880
units[0] = match unit.target.kind() {

src/cargo/util/dependency_queue.rs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,10 @@ impl<N: Hash + Eq + Clone, E: Eq + Hash + Clone, V> DependencyQueue<N, E, V> {
170170
self.dep_map.len()
171171
}
172172

173+
pub fn dep_map(&self) -> &HashMap<N, (HashSet<(N, E)>, V)> {
174+
&self.dep_map
175+
}
176+
173177
/// Indicate that something has finished.
174178
///
175179
/// Calling this function indicates that the `node` has produced `edge`. All

0 commit comments

Comments
 (0)