@@ -342,18 +342,6 @@ impl<K: DepKind> DepGraphData<K> {
342
342
task : fn ( Ctxt , A ) -> R ,
343
343
hash_result : Option < fn ( & mut StableHashingContext < ' _ > , & R ) -> Fingerprint > ,
344
344
) -> ( R , DepNodeIndex ) {
345
- // If the following assertion triggers, it can have two reasons:
346
- // 1. Something is wrong with DepNode creation, either here or
347
- // in `DepGraph::try_mark_green()`.
348
- // 2. Two distinct query keys get mapped to the same `DepNode`
349
- // (see for example #48923).
350
- assert ! (
351
- !self . dep_node_exists( & key) ,
352
- "forcing query with already existing `DepNode`\n \
353
- - query-key: {arg:?}\n \
354
- - dep-node: {key:?}"
355
- ) ;
356
-
357
345
let with_deps = |task_deps| K :: with_deps ( task_deps, || task ( cx, arg) ) ;
358
346
let ( result, edges) = if cx. dep_context ( ) . is_eval_always ( key. kind ) {
359
347
( with_deps ( TaskDepsRef :: EvalAlways ) , smallvec ! [ ] )
@@ -448,12 +436,32 @@ impl<K: DepKind> DepGraphData<K> {
448
436
hash : self . current . anon_id_seed . combine ( hasher. finish ( ) ) . into ( ) ,
449
437
} ;
450
438
451
- self . current . intern_new_node (
452
- cx. profiler ( ) ,
453
- target_dep_node,
454
- task_deps,
455
- Fingerprint :: ZERO ,
456
- )
439
+ // The DepNodes generated by the process above are not unique. 2 queries could
440
+ // have exactly the same dependencies. However, deserialization does not handle
441
+ // duplicated nodes, so we do the deduplication here directly.
442
+ //
443
+ // As anonymous nodes are a small quantity compared to the full dep-graph, the
444
+ // memory impact of this `anon_node_to_index` map remains tolerable, and helps
445
+ // us avoid useless growth of the graph with almost-equivalent nodes.
446
+ match self
447
+ . current
448
+ . anon_node_to_index
449
+ . get_shard_by_value ( & target_dep_node)
450
+ . lock ( )
451
+ . entry ( target_dep_node)
452
+ {
453
+ Entry :: Occupied ( entry) => * entry. get ( ) ,
454
+ Entry :: Vacant ( entry) => {
455
+ let dep_node_index = self . current . intern_new_node (
456
+ cx. profiler ( ) ,
457
+ target_dep_node,
458
+ task_deps,
459
+ Fingerprint :: ZERO ,
460
+ ) ;
461
+ entry. insert ( dep_node_index) ;
462
+ dep_node_index
463
+ }
464
+ }
457
465
}
458
466
} ;
459
467
@@ -624,25 +632,6 @@ impl<K: DepKind> DepGraph<K> {
624
632
}
625
633
626
634
impl < K : DepKind > DepGraphData < K > {
627
- #[ inline]
628
- pub fn dep_node_index_of_opt ( & self , dep_node : & DepNode < K > ) -> Option < DepNodeIndex > {
629
- if let Some ( prev_index) = self . previous . node_to_index_opt ( dep_node) {
630
- self . current . prev_index_to_index . lock ( ) [ prev_index]
631
- } else {
632
- self . current
633
- . new_node_to_index
634
- . get_shard_by_value ( dep_node)
635
- . lock ( )
636
- . get ( dep_node)
637
- . copied ( )
638
- }
639
- }
640
-
641
- #[ inline]
642
- pub fn dep_node_exists ( & self , dep_node : & DepNode < K > ) -> bool {
643
- self . dep_node_index_of_opt ( dep_node) . is_some ( )
644
- }
645
-
646
635
fn node_color ( & self , dep_node : & DepNode < K > ) -> Option < DepNodeColor > {
647
636
if let Some ( prev_index) = self . previous . node_to_index_opt ( dep_node) {
648
637
self . colors . get ( prev_index)
@@ -675,11 +664,6 @@ impl<K: DepKind> DepGraphData<K> {
675
664
}
676
665
677
666
impl < K : DepKind > DepGraph < K > {
678
- #[ inline]
679
- pub fn dep_node_exists ( & self , dep_node : & DepNode < K > ) -> bool {
680
- self . data . as_ref ( ) . is_some_and ( |data| data. dep_node_exists ( dep_node) )
681
- }
682
-
683
667
/// Checks whether a previous work product exists for `v` and, if
684
668
/// so, return the path that leads to it. Used to skip doing work.
685
669
pub fn previous_work_product ( & self , v : & WorkProductId ) -> Option < WorkProduct > {
@@ -761,7 +745,7 @@ impl<K: DepKind> DepGraphData<K> {
761
745
}
762
746
}
763
747
764
- #[ instrument( skip( self , qcx, parent_dep_node_index , frame) , level = "debug" ) ]
748
+ #[ instrument( skip( self , qcx, frame) , level = "debug" ) ]
765
749
fn try_mark_parent_green < Qcx : QueryContext < DepKind = K > > (
766
750
& self ,
767
751
qcx : Qcx ,
@@ -860,10 +844,7 @@ impl<K: DepKind> DepGraphData<K> {
860
844
let frame = MarkFrame { index : prev_dep_node_index, parent : frame } ;
861
845
862
846
#[ cfg( not( parallel_compiler) ) ]
863
- {
864
- debug_assert ! ( !self . dep_node_exists( dep_node) ) ;
865
- debug_assert ! ( self . colors. get( prev_dep_node_index) . is_none( ) ) ;
866
- }
847
+ debug_assert ! ( self . colors. get( prev_dep_node_index) . is_none( ) ) ;
867
848
868
849
// We never try to mark eval_always nodes as green
869
850
debug_assert ! ( !qcx. dep_context( ) . is_eval_always( dep_node. kind) ) ;
@@ -1065,24 +1046,24 @@ rustc_index::newtype_index! {
1065
1046
/// largest in the compiler.
1066
1047
///
1067
1048
/// For this reason, we avoid storing `DepNode`s more than once as map
1068
- /// keys. The `new_node_to_index ` map only contains nodes not in the previous
1049
+ /// keys. The `anon_node_to_index ` map only contains nodes of anonymous queries not in the previous
1069
1050
/// graph, and we map nodes in the previous graph to indices via a two-step
1070
1051
/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
1071
1052
/// and the `prev_index_to_index` vector (which is more compact and faster than
1072
1053
/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
1073
1054
///
1074
- /// This struct uses three locks internally. The `data`, `new_node_to_index `,
1055
+ /// This struct uses three locks internally. The `data`, `anon_node_to_index `,
1075
1056
/// and `prev_index_to_index` fields are locked separately. Operations that take
1076
1057
/// a `DepNodeIndex` typically just access the `data` field.
1077
1058
///
1078
1059
/// We only need to manipulate at most two locks simultaneously:
1079
- /// `new_node_to_index ` and `data`, or `prev_index_to_index` and `data`. When
1080
- /// manipulating both, we acquire `new_node_to_index ` or `prev_index_to_index`
1060
+ /// `anon_node_to_index ` and `data`, or `prev_index_to_index` and `data`. When
1061
+ /// manipulating both, we acquire `anon_node_to_index ` or `prev_index_to_index`
1081
1062
/// first, and `data` second.
1082
1063
pub ( super ) struct CurrentDepGraph < K : DepKind > {
1083
1064
encoder : Steal < GraphEncoder < K > > ,
1084
- new_node_to_index : Sharded < FxHashMap < DepNode < K > , DepNodeIndex > > ,
1085
1065
prev_index_to_index : Lock < IndexVec < SerializedDepNodeIndex , Option < DepNodeIndex > > > ,
1066
+ anon_node_to_index : Sharded < FxHashMap < DepNode < K > , DepNodeIndex > > ,
1086
1067
1087
1068
/// This is used to verify that fingerprints do not change between the creation of a node
1088
1069
/// and its recomputation.
@@ -1162,7 +1143,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
1162
1143
record_graph,
1163
1144
record_stats,
1164
1145
) ) ,
1165
- new_node_to_index : Sharded :: new ( || {
1146
+ anon_node_to_index : Sharded :: new ( || {
1166
1147
FxHashMap :: with_capacity_and_hasher (
1167
1148
new_node_count_estimate / sharded:: SHARDS ,
1168
1149
Default :: default ( ) ,
@@ -1199,16 +1180,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
1199
1180
edges : EdgesVec ,
1200
1181
current_fingerprint : Fingerprint ,
1201
1182
) -> DepNodeIndex {
1202
- let dep_node_index = match self . new_node_to_index . get_shard_by_value ( & key) . lock ( ) . entry ( key)
1203
- {
1204
- Entry :: Occupied ( entry) => * entry. get ( ) ,
1205
- Entry :: Vacant ( entry) => {
1206
- let dep_node_index =
1207
- self . encoder . borrow ( ) . send ( profiler, key, current_fingerprint, edges) ;
1208
- entry. insert ( dep_node_index) ;
1209
- dep_node_index
1210
- }
1211
- } ;
1183
+ let dep_node_index = self . encoder . borrow ( ) . send ( profiler, key, current_fingerprint, edges) ;
1212
1184
1213
1185
#[ cfg( debug_assertions) ]
1214
1186
self . record_edge ( dep_node_index, key, current_fingerprint) ;
@@ -1296,8 +1268,6 @@ impl<K: DepKind> CurrentDepGraph<K> {
1296
1268
prev_graph : & SerializedDepGraph < K > ,
1297
1269
prev_index : SerializedDepNodeIndex ,
1298
1270
) -> DepNodeIndex {
1299
- self . debug_assert_not_in_new_nodes ( prev_graph, prev_index) ;
1300
-
1301
1271
let mut prev_index_to_index = self . prev_index_to_index . lock ( ) ;
1302
1272
1303
1273
match prev_index_to_index[ prev_index] {
@@ -1318,19 +1288,6 @@ impl<K: DepKind> CurrentDepGraph<K> {
1318
1288
}
1319
1289
}
1320
1290
}
1321
-
1322
- #[ inline]
1323
- fn debug_assert_not_in_new_nodes (
1324
- & self ,
1325
- prev_graph : & SerializedDepGraph < K > ,
1326
- prev_index : SerializedDepNodeIndex ,
1327
- ) {
1328
- let node = & prev_graph. index_to_node ( prev_index) ;
1329
- debug_assert ! (
1330
- !self . new_node_to_index. get_shard_by_value( node) . lock( ) . contains_key( node) ,
1331
- "node from previous graph present in new node collection"
1332
- ) ;
1333
- }
1334
1291
}
1335
1292
1336
1293
/// The capacity of the `reads` field `SmallVec`
0 commit comments