@@ -138,6 +138,21 @@ pub struct Parser<'a> {
138
138
token_cursor : TokenCursor ,
139
139
// The number of calls to `bump`, i.e. the position in the token stream.
140
140
num_bump_calls : usize ,
141
+ // During parsing we may sometimes need to 'unglue' a glued token into two
142
+ // component tokens (e.g. '>>' into '>' and '>), so the parser can consume
143
+ // them one at a time. This process bypasses the normal capturing mechanism
144
+ // (e.g. `num_bump_calls` will not be incremented), since the 'unglued'
145
+ // tokens due not exist in the original `TokenStream`.
146
+ //
147
+ // If we end up consuming both unglued tokens, this is not an issue. We'll
148
+ // end up capturing the single 'glued' token.
149
+ //
150
+ // However, sometimes we may want to capture just the first 'unglued'
151
+ // token. For example, capturing the `Vec<u8>` in `Option<Vec<u8>>`
152
+ // requires us to unglue the trailing `>>` token. The `break_last_token`
153
+ // field is used to track this token. It gets appended to the captured
154
+ // stream when we evaluate a `LazyAttrTokenStream`.
155
+ break_last_token : bool ,
141
156
/// This field is used to keep track of how many left angle brackets we have seen. This is
142
157
/// required in order to detect extra leading left angle brackets (`<` characters) and error
143
158
/// appropriately.
@@ -161,7 +176,7 @@ pub struct Parser<'a> {
161
176
// This type is used a lot, e.g. it's cloned when matching many declarative macro rules with nonterminals. Make sure
162
177
// it doesn't unintentionally get bigger.
163
178
#[ cfg( all( target_arch = "x86_64" , target_pointer_width = "64" ) ) ]
164
- rustc_data_structures:: static_assert_size!( Parser <' _>, 272 ) ;
179
+ rustc_data_structures:: static_assert_size!( Parser <' _>, 264 ) ;
165
180
166
181
/// Stores span information about a closure.
167
182
#[ derive( Clone ) ]
@@ -223,29 +238,6 @@ struct TokenCursor {
223
238
// tokens are in `stack[n-1]`. `stack[0]` (when present) has no delimiters
224
239
// because it's the outermost token stream which never has delimiters.
225
240
stack : Vec < ( TokenTreeCursor , Delimiter , DelimSpan ) > ,
226
-
227
- // During parsing, we may sometimes need to 'unglue' a
228
- // glued token into two component tokens
229
- // (e.g. '>>' into '>' and '>), so that the parser
230
- // can consume them one at a time. This process
231
- // bypasses the normal capturing mechanism
232
- // (e.g. `num_next_calls` will not be incremented),
233
- // since the 'unglued' tokens due not exist in
234
- // the original `TokenStream`.
235
- //
236
- // If we end up consuming both unglued tokens,
237
- // then this is not an issue - we'll end up
238
- // capturing the single 'glued' token.
239
- //
240
- // However, in certain circumstances, we may
241
- // want to capture just the first 'unglued' token.
242
- // For example, capturing the `Vec<u8>`
243
- // in `Option<Vec<u8>>` requires us to unglue
244
- // the trailing `>>` token. The `break_last_token`
245
- // field is used to track this token - it gets
246
- // appended to the captured stream when
247
- // we evaluate a `LazyAttrTokenStream`.
248
- break_last_token : bool ,
249
241
}
250
242
251
243
impl TokenCursor {
@@ -396,12 +388,9 @@ impl<'a> Parser<'a> {
396
388
capture_cfg : false ,
397
389
restrictions : Restrictions :: empty ( ) ,
398
390
expected_tokens : Vec :: new ( ) ,
399
- token_cursor : TokenCursor {
400
- tree_cursor : stream. into_trees ( ) ,
401
- stack : Vec :: new ( ) ,
402
- break_last_token : false ,
403
- } ,
391
+ token_cursor : TokenCursor { tree_cursor : stream. into_trees ( ) , stack : Vec :: new ( ) } ,
404
392
num_bump_calls : 0 ,
393
+ break_last_token : false ,
405
394
unmatched_angle_bracket_count : 0 ,
406
395
max_angle_bracket_count : 0 ,
407
396
last_unexpected_token_span : None ,
@@ -704,7 +693,7 @@ impl<'a> Parser<'a> {
704
693
// If we consume any additional tokens, then this token
705
694
// is not needed (we'll capture the entire 'glued' token),
706
695
// and `bump` will set this field to `None`
707
- self . token_cursor . break_last_token = true ;
696
+ self . break_last_token = true ;
708
697
// Use the spacing of the glued token as the spacing
709
698
// of the unglued second token.
710
699
self . bump_with ( ( Token :: new ( second, second_span) , self . token_spacing ) ) ;
@@ -1050,7 +1039,7 @@ impl<'a> Parser<'a> {
1050
1039
// We've retrieved an token from the underlying
1051
1040
// cursor, so we no longer need to worry about
1052
1041
// an unglued token. See `break_and_eat` for more details
1053
- self . token_cursor . break_last_token = false ;
1042
+ self . break_last_token = false ;
1054
1043
if next. 0 . span . is_dummy ( ) {
1055
1044
// Tweak the location for better diagnostics, but keep syntactic context intact.
1056
1045
let fallback_span = self . token . span ;
0 commit comments