Unnamed repository; edit this file 'description' to name the repository.
Merge pull request #19712 from Veykril/push-yzsxpmkytkzt
refactor: Clean up cache priming cancellation handling
Lukas Wirth 2025-04-29
parent fe7b4f2 · parent 7d9b839 · commit 26c7960
-rw-r--r--crates/hir/src/symbols.rs17
-rw-r--r--crates/ide-db/src/lib.rs10
-rw-r--r--crates/ide-db/src/prime_caches.rs51
-rw-r--r--crates/ide/src/lib.rs4
-rw-r--r--crates/rust-analyzer/src/cli/analysis_stats.rs3
-rw-r--r--crates/rust-analyzer/src/discover.rs6
-rw-r--r--crates/stdx/src/thread.rs2
7 files changed, 54 insertions, 39 deletions
diff --git a/crates/hir/src/symbols.rs b/crates/hir/src/symbols.rs
index 3d944afb8b..e87ab87407 100644
--- a/crates/hir/src/symbols.rs
+++ b/crates/hir/src/symbols.rs
@@ -13,13 +13,13 @@ use hir_def::{
use hir_expand::{HirFileId, name::Name};
use hir_ty::{
db::HirDatabase,
- display::{DisplayTarget, HirDisplay, hir_display_with_store},
+ display::{HirDisplay, hir_display_with_store},
};
use intern::Symbol;
use rustc_hash::FxHashMap;
use syntax::{AstNode, AstPtr, SmolStr, SyntaxNode, SyntaxNodePtr, ToSmolStr, ast::HasName};
-use crate::{Module, ModuleDef, Semantics};
+use crate::{HasCrate, Module, ModuleDef, Semantics};
pub type FxIndexSet<T> = indexmap::IndexSet<T, std::hash::BuildHasherDefault<rustc_hash::FxHasher>>;
@@ -66,7 +66,6 @@ pub struct SymbolCollector<'a> {
symbols: FxIndexSet<FileSymbol>,
work: Vec<SymbolCollectorWork>,
current_container_name: Option<SmolStr>,
- display_target: DisplayTarget,
}
/// Given a [`ModuleId`] and a [`HirDatabase`], use the DefMap for the module's crate to collect
@@ -78,10 +77,6 @@ impl<'a> SymbolCollector<'a> {
symbols: Default::default(),
work: Default::default(),
current_container_name: None,
- display_target: DisplayTarget::from_crate(
- db,
- *db.all_crates().last().expect("no crate graph present"),
- ),
}
}
@@ -93,8 +88,7 @@ impl<'a> SymbolCollector<'a> {
pub fn collect(&mut self, module: Module) {
let _p = tracing::info_span!("SymbolCollector::collect", ?module).entered();
- tracing::info!(?module, "SymbolCollector::collect",);
- self.display_target = module.krate().to_display_target(self.db);
+ tracing::info!(?module, "SymbolCollector::collect");
// The initial work is the root module we're collecting, additional work will
// be populated as we traverse the module's definitions.
@@ -321,7 +315,10 @@ impl<'a> SymbolCollector<'a> {
let impl_data = self.db.impl_signature(impl_id);
let impl_name = Some(
hir_display_with_store(impl_data.self_ty, &impl_data.store)
- .display(self.db, self.display_target)
+ .display(
+ self.db,
+ crate::Impl::from(impl_id).krate(self.db).to_display_target(self.db),
+ )
.to_smolstr(),
);
self.with_container_name(impl_name, |s| {
diff --git a/crates/ide-db/src/lib.rs b/crates/ide-db/src/lib.rs
index d3934e14ab..a433f184e7 100644
--- a/crates/ide-db/src/lib.rs
+++ b/crates/ide-db/src/lib.rs
@@ -78,6 +78,8 @@ pub type FileRange = FileRangeWrapper<FileId>;
#[salsa::db]
pub struct RootDatabase {
+ // FIXME: Revisit this commit now that we migrated to the new salsa, given we store arcs in this
+ // db directly now
// We use `ManuallyDrop` here because every codegen unit that contains a
// `&RootDatabase -> &dyn OtherDatabase` cast will instantiate its drop glue in the vtable,
// which duplicates `Weak::drop` and `Arc::drop` tens of thousands of times, which makes
@@ -234,14 +236,6 @@ impl RootDatabase {
// );
// hir::db::BodyWithSourceMapQuery.in_db_mut(self).set_lru_capacity(2048);
}
-
- pub fn snapshot(&self) -> Self {
- Self {
- storage: self.storage.clone(),
- files: self.files.clone(),
- crates_map: self.crates_map.clone(),
- }
- }
}
#[query_group::query_group]
diff --git a/crates/ide-db/src/prime_caches.rs b/crates/ide-db/src/prime_caches.rs
index 17c3f75ce1..5e8d016bfb 100644
--- a/crates/ide-db/src/prime_caches.rs
+++ b/crates/ide-db/src/prime_caches.rs
@@ -51,6 +51,7 @@ pub fn parallel_prime_caches(
enum ParallelPrimeCacheWorkerProgress {
BeginCrate { crate_id: Crate, crate_name: Symbol },
EndCrate { crate_id: Crate },
+ Cancelled(Cancelled),
}
// We split off def map computation from other work,
@@ -71,26 +72,32 @@ pub fn parallel_prime_caches(
progress_sender
.send(ParallelPrimeCacheWorkerProgress::BeginCrate { crate_id, crate_name })?;
- match kind {
+ let cancelled = Cancelled::catch(|| match kind {
PrimingPhase::DefMap => _ = db.crate_def_map(crate_id),
PrimingPhase::ImportMap => _ = db.import_map(crate_id),
PrimingPhase::CrateSymbols => _ = db.crate_symbols(crate_id.into()),
- }
+ });
- progress_sender.send(ParallelPrimeCacheWorkerProgress::EndCrate { crate_id })?;
+ match cancelled {
+ Ok(()) => progress_sender
+ .send(ParallelPrimeCacheWorkerProgress::EndCrate { crate_id })?,
+ Err(cancelled) => progress_sender
+ .send(ParallelPrimeCacheWorkerProgress::Cancelled(cancelled))?,
+ }
}
Ok::<_, crossbeam_channel::SendError<_>>(())
};
for id in 0..num_worker_threads {
- let worker = prime_caches_worker.clone();
- let db = db.snapshot();
-
stdx::thread::Builder::new(stdx::thread::ThreadIntent::Worker)
.allow_leak(true)
.name(format!("PrimeCaches#{id}"))
- .spawn(move || Cancelled::catch(|| worker(db.snapshot())))
+ .spawn({
+ let worker = prime_caches_worker.clone();
+ let db = db.clone();
+ move || worker(db)
+ })
.expect("failed to spawn thread");
}
@@ -142,9 +149,14 @@ pub fn parallel_prime_caches(
continue;
}
Err(crossbeam_channel::RecvTimeoutError::Disconnected) => {
- // our workers may have died from a cancelled task, so we'll check and re-raise here.
- db.unwind_if_revision_cancelled();
- break;
+ // all our workers have exited, mark us as finished and exit
+ cb(ParallelPrimeCachesProgress {
+ crates_currently_indexing: vec![],
+ crates_done,
+ crates_total: crates_done,
+ work_type: "Indexing",
+ });
+ return;
}
};
match worker_progress {
@@ -156,6 +168,10 @@ pub fn parallel_prime_caches(
crates_to_prime.mark_done(crate_id);
crates_done += 1;
}
+ ParallelPrimeCacheWorkerProgress::Cancelled(cancelled) => {
+ // Cancelled::throw should probably be public
+ std::panic::resume_unwind(Box::new(cancelled));
+ }
};
let progress = ParallelPrimeCachesProgress {
@@ -186,9 +202,14 @@ pub fn parallel_prime_caches(
continue;
}
Err(crossbeam_channel::RecvTimeoutError::Disconnected) => {
- // our workers may have died from a cancelled task, so we'll check and re-raise here.
- db.unwind_if_revision_cancelled();
- break;
+ // all our workers have exited, mark us as finished and exit
+ cb(ParallelPrimeCachesProgress {
+ crates_currently_indexing: vec![],
+ crates_done,
+ crates_total: crates_done,
+ work_type: "Populating symbols",
+ });
+ return;
}
};
match worker_progress {
@@ -199,6 +220,10 @@ pub fn parallel_prime_caches(
crates_currently_indexing.swap_remove(&crate_id);
crates_done += 1;
}
+ ParallelPrimeCacheWorkerProgress::Cancelled(cancelled) => {
+ // Cancelled::throw should probably be public
+ std::panic::resume_unwind(Box::new(cancelled));
+ }
};
let progress = ParallelPrimeCachesProgress {
diff --git a/crates/ide/src/lib.rs b/crates/ide/src/lib.rs
index e7f5fcbf69..aa525a8612 100644
--- a/crates/ide/src/lib.rs
+++ b/crates/ide/src/lib.rs
@@ -182,7 +182,7 @@ impl AnalysisHost {
/// Returns a snapshot of the current state, which you can query for
/// semantic information.
pub fn analysis(&self) -> Analysis {
- Analysis { db: self.db.snapshot() }
+ Analysis { db: self.db.clone() }
}
/// Applies changes to the current state of the world. If there are
@@ -864,7 +864,7 @@ impl Analysis {
where
F: FnOnce(&RootDatabase) -> T + std::panic::UnwindSafe,
{
- let snap = self.db.snapshot();
+ let snap = self.db.clone();
Cancelled::catch(|| f(&snap))
}
}
diff --git a/crates/rust-analyzer/src/cli/analysis_stats.rs b/crates/rust-analyzer/src/cli/analysis_stats.rs
index c50df4b6d4..a1e4adf084 100644
--- a/crates/rust-analyzer/src/cli/analysis_stats.rs
+++ b/crates/rust-analyzer/src/cli/analysis_stats.rs
@@ -701,10 +701,9 @@ impl flags::AnalysisStats {
if self.parallel {
let mut inference_sw = self.stop_watch();
- let snap = db.snapshot();
bodies
.par_iter()
- .map_with(snap, |snap, &body| {
+ .map_with(db.clone(), |snap, &body| {
snap.body(body.into());
snap.infer(body.into());
})
diff --git a/crates/rust-analyzer/src/discover.rs b/crates/rust-analyzer/src/discover.rs
index 67ddc41f3b..24c433610f 100644
--- a/crates/rust-analyzer/src/discover.rs
+++ b/crates/rust-analyzer/src/discover.rs
@@ -126,10 +126,8 @@ impl CargoParser<DiscoverProjectMessage> for DiscoverProjectParser {
Some(msg)
}
Err(err) => {
- let err = DiscoverProjectData::Error {
- error: format!("{:#?}\n{}", err, line),
- source: None,
- };
+ let err =
+ DiscoverProjectData::Error { error: format!("{err:#?}\n{line}"), source: None };
Some(DiscoverProjectMessage::new(err))
}
}
diff --git a/crates/stdx/src/thread.rs b/crates/stdx/src/thread.rs
index 6c742fecf1..3d14538312 100644
--- a/crates/stdx/src/thread.rs
+++ b/crates/stdx/src/thread.rs
@@ -56,6 +56,8 @@ impl Builder {
Self { inner: self.inner.stack_size(size), ..self }
}
+ /// Whether dropping should detach the thread
+ /// instead of joining it.
#[must_use]
pub fn allow_leak(self, allow_leak: bool) -> Self {
Self { allow_leak, ..self }