Skip to content

Commit baf0768

Browse files
committed
inline helper methods into with_new_goal
1 parent b1d9cb9 commit baf0768

File tree

1 file changed

+80
-115
lines changed
  • compiler/rustc_trait_selection/src/solve/search_graph

1 file changed

+80
-115
lines changed

compiler/rustc_trait_selection/src/solve/search_graph/mod.rs

+80-115
Original file line numberDiff line numberDiff line change
@@ -165,21 +165,46 @@ impl<'tcx> SearchGraph<'tcx> {
165165
}
166166
}
167167

168-
/// Tries putting the new goal on the stack, returning an error if it is already cached.
168+
/// Probably the most involved method of the whole solver.
169169
///
170-
/// This correctly updates the provisional cache if there is a cycle.
171-
#[instrument(level = "debug", skip(self, tcx, inspect), ret)]
172-
fn try_push_stack(
170+
/// Given some goal which is proven via the `prove_goal` closure, this
171+
/// handles caching, overflow, and coinductive cycles.
172+
pub(super) fn with_new_goal(
173173
&mut self,
174174
tcx: TyCtxt<'tcx>,
175175
input: CanonicalInput<'tcx>,
176-
available_depth: Limit,
177176
inspect: &mut ProofTreeBuilder<'tcx>,
178-
) -> Result<(), QueryResult<'tcx>> {
179-
// Look at the provisional cache to check for cycles.
177+
mut prove_goal: impl FnMut(&mut Self, &mut ProofTreeBuilder<'tcx>) -> QueryResult<'tcx>,
178+
) -> QueryResult<'tcx> {
179+
// Check for overflow.
180+
let Some(available_depth) = Self::allowed_depth_for_nested(tcx, &self.stack) else {
181+
if let Some(last) = self.stack.raw.last_mut() {
182+
last.encountered_overflow = true;
183+
}
184+
return Self::response_no_constraints(tcx, input, Certainty::OVERFLOW);
185+
};
186+
187+
// Try to fetch the goal from the global cache.
188+
if inspect.use_global_cache() {
189+
if let Some(CacheData { result, reached_depth, encountered_overflow }) =
190+
self.global_cache(tcx).get(
191+
tcx,
192+
input,
193+
|cycle_participants| {
194+
self.stack.iter().any(|entry| cycle_participants.contains(&entry.input))
195+
},
196+
available_depth,
197+
)
198+
{
199+
self.on_cache_hit(reached_depth, encountered_overflow);
200+
return result;
201+
}
202+
}
203+
204+
// Look at the provisional cache to detect cycles.
180205
let cache = &mut self.provisional_cache;
181206
match cache.lookup_table.entry(input) {
182-
// No entry, simply push this goal on the stack.
207+
// No entry, we push this goal on the stack and try to prove it.
183208
Entry::Vacant(v) => {
184209
let depth = self.stack.next_index();
185210
let entry = StackEntry {
@@ -194,13 +219,12 @@ impl<'tcx> SearchGraph<'tcx> {
194219
let response = Self::response_no_constraints(tcx, input, Certainty::Yes);
195220
let entry_index = cache.entries.push(ProvisionalEntry { response, depth, input });
196221
v.insert(entry_index);
197-
Ok(())
198222
}
199223
// We have a nested goal which relies on a goal `root` deeper in the stack.
200224
//
201-
// We first store that we may have to rerun `evaluate_goal` for `root` in case the
202-
// provisional response is not equal to the final response. We also update the depth
203-
// of all goals which recursively depend on our current goal to depend on `root`
225+
// We first store that we may have to reprove `root` in case the provisional
226+
// response is not equal to the final response. We also update the depth of all
227+
// goals which recursively depend on our current goal to depend on `root`
204228
// instead.
205229
//
206230
// Finally we can return either the provisional response for that goal if we have a
@@ -209,7 +233,6 @@ impl<'tcx> SearchGraph<'tcx> {
209233
inspect.cache_hit(CacheHit::Provisional);
210234

211235
let entry_index = *entry_index.get();
212-
213236
let stack_depth = cache.depth(entry_index);
214237
debug!("encountered cycle with depth {stack_depth:?}");
215238

@@ -233,112 +256,55 @@ impl<'tcx> SearchGraph<'tcx> {
233256
// If we're in a coinductive cycle, we have to retry proving the current goal
234257
// until we reach a fixpoint.
235258
self.stack[stack_depth].has_been_used = true;
236-
Err(cache.provisional_result(entry_index))
259+
return cache.provisional_result(entry_index);
237260
} else {
238-
Err(Self::response_no_constraints(tcx, input, Certainty::OVERFLOW))
261+
return Self::response_no_constraints(tcx, input, Certainty::OVERFLOW);
239262
}
240263
}
241264
}
242-
}
243-
244-
/// We cannot simply store the result of [super::EvalCtxt::compute_goal] as we have to deal with
245-
/// coinductive cycles.
246-
///
247-
/// When we encounter a coinductive cycle, we have to prove the final result of that cycle
248-
/// while we are still computing that result. Because of this we continuously recompute the
249-
/// cycle until the result of the previous iteration is equal to the final result, at which
250-
/// point we are done.
251-
///
252-
/// This function returns `true` if we were able to finalize the goal and `false` if it has
253-
/// updated the provisional cache and we have to recompute the current goal.
254-
///
255-
/// FIXME: Refer to the rustc-dev-guide entry once it exists.
256-
#[instrument(level = "debug", skip(self, actual_input), ret)]
257-
fn try_finalize_goal(
258-
&mut self,
259-
actual_input: CanonicalInput<'tcx>,
260-
response: QueryResult<'tcx>,
261-
) -> Result<StackEntry<'tcx>, ()> {
262-
let stack_entry = self.pop_stack();
263-
assert_eq!(stack_entry.input, actual_input);
264-
265-
let cache = &mut self.provisional_cache;
266-
let provisional_entry_index = *cache.lookup_table.get(&stack_entry.input).unwrap();
267-
let provisional_entry = &mut cache.entries[provisional_entry_index];
268-
// We eagerly update the response in the cache here. If we have to reevaluate
269-
// this goal we use the new response when hitting a cycle, and we definitely
270-
// want to access the final response whenever we look at the cache.
271-
let prev_response = mem::replace(&mut provisional_entry.response, response);
272-
273-
// Was the current goal the root of a cycle and was the provisional response
274-
// different from the final one.
275-
if stack_entry.has_been_used && prev_response != response {
276-
// If so, remove all entries whose result depends on this goal
277-
// from the provisional cache...
278-
//
279-
// That's not completely correct, as a nested goal can also
280-
// depend on a goal which is lower in the stack so it doesn't
281-
// actually depend on the current goal. This should be fairly
282-
// rare and is hopefully not relevant for performance.
283-
#[allow(rustc::potential_query_instability)]
284-
cache.lookup_table.retain(|_key, index| *index <= provisional_entry_index);
285-
cache.entries.truncate(provisional_entry_index.index() + 1);
286-
287-
// ...and finally push our goal back on the stack and reevaluate it.
288-
self.stack.push(StackEntry { has_been_used: false, ..stack_entry });
289-
Err(())
290-
} else {
291-
Ok(stack_entry)
292-
}
293-
}
294-
295-
pub(super) fn with_new_goal(
296-
&mut self,
297-
tcx: TyCtxt<'tcx>,
298-
input: CanonicalInput<'tcx>,
299-
inspect: &mut ProofTreeBuilder<'tcx>,
300-
mut loop_body: impl FnMut(&mut Self, &mut ProofTreeBuilder<'tcx>) -> QueryResult<'tcx>,
301-
) -> QueryResult<'tcx> {
302-
let Some(available_depth) = Self::allowed_depth_for_nested(tcx, &self.stack) else {
303-
if let Some(last) = self.stack.raw.last_mut() {
304-
last.encountered_overflow = true;
305-
}
306-
return Self::response_no_constraints(tcx, input, Certainty::OVERFLOW);
307-
};
308-
309-
if inspect.use_global_cache() {
310-
if let Some(CacheData { result, reached_depth, encountered_overflow }) =
311-
self.global_cache(tcx).get(
312-
tcx,
313-
input,
314-
|cycle_participants| {
315-
self.stack.iter().any(|entry| cycle_participants.contains(&entry.input))
316-
},
317-
available_depth,
318-
)
319-
{
320-
self.on_cache_hit(reached_depth, encountered_overflow);
321-
return result;
322-
}
323-
}
324-
325-
match self.try_push_stack(tcx, input, available_depth, inspect) {
326-
Ok(()) => {}
327-
// Our goal is already on the stack, eager return.
328-
Err(response) => return response,
329-
}
330265

331266
// This is for global caching, so we properly track query dependencies.
332-
// Everything that affects the `Result` should be performed within this
267+
// Everything that affects the `result` should be performed within this
333268
// `with_anon_task` closure.
334269
let ((final_entry, result), dep_node) =
335270
tcx.dep_graph.with_anon_task(tcx, DepKind::TraitSelect, || {
336-
// We run our goal in a loop to handle coinductive cycles. If we fail to reach a
337-
// fipoint we return overflow.
271+
// When we encounter a coinductive cycle, we have to fetch the
272+
// result of that cycle while we are still computing it. Because
273+
// of this we continuously recompute the cycle until the result
274+
// of the previous iteration is equal to the final result, at which
275+
// point we are done.
338276
for _ in 0..self.local_overflow_limit() {
339-
let result = loop_body(self, inspect);
340-
if let Ok(final_entry) = self.try_finalize_goal(input, result) {
341-
return (final_entry, result);
277+
let response = prove_goal(self, inspect);
278+
279+
// Check whether the current goal is the root of a cycle and whether
280+
// we have to rerun because its provisional result differed from the
281+
// final result.
282+
//
283+
// Also update the response for this goal stored in the provisional
284+
// cache.
285+
let stack_entry = self.pop_stack();
286+
debug_assert_eq!(stack_entry.input, input);
287+
let cache = &mut self.provisional_cache;
288+
let provisional_entry_index =
289+
*cache.lookup_table.get(&stack_entry.input).unwrap();
290+
let provisional_entry = &mut cache.entries[provisional_entry_index];
291+
let prev_response = mem::replace(&mut provisional_entry.response, response);
292+
if stack_entry.has_been_used && prev_response != response {
293+
// If so, remove all entries whose result depends on this goal
294+
// from the provisional cache...
295+
//
296+
// That's not completely correct, as a nested goal can also
297+
// depend on a goal which is lower in the stack so it doesn't
298+
// actually depend on the current goal. This should be fairly
299+
// rare and is hopefully not relevant for performance.
300+
#[allow(rustc::potential_query_instability)]
301+
cache.lookup_table.retain(|_key, index| *index <= provisional_entry_index);
302+
cache.entries.truncate(provisional_entry_index.index() + 1);
303+
304+
// ...and finally push our goal back on the stack and reevaluate it.
305+
self.stack.push(StackEntry { has_been_used: false, ..stack_entry });
306+
} else {
307+
return (stack_entry, response);
342308
}
343309
}
344310

@@ -348,17 +314,16 @@ impl<'tcx> SearchGraph<'tcx> {
348314
(current_entry, result)
349315
});
350316

351-
let cache = &mut self.provisional_cache;
352-
let provisional_entry_index = *cache.lookup_table.get(&input).unwrap();
353-
let provisional_entry = &mut cache.entries[provisional_entry_index];
354-
let depth = provisional_entry.depth;
355-
356-
// We're now done with this goal. In case this goal is involved in a cycle
317+
// We're now done with this goal. In case this goal is involved in a larger cycle
357318
// do not remove it from the provisional cache and do not add it to the global
358319
// cache.
359320
//
360321
// It is not possible for any nested goal to depend on something deeper on the
361322
// stack, as this would have also updated the depth of the current goal.
323+
let cache = &mut self.provisional_cache;
324+
let provisional_entry_index = *cache.lookup_table.get(&input).unwrap();
325+
let provisional_entry = &mut cache.entries[provisional_entry_index];
326+
let depth = provisional_entry.depth;
362327
if depth == self.stack.next_index() {
363328
for (i, entry) in cache.entries.drain_enumerated(provisional_entry_index.index()..) {
364329
let actual_index = cache.lookup_table.remove(&entry.input);

0 commit comments

Comments
 (0)