rustc_expand/mbe/macro_parser.rs
1//! This is an NFA-based parser, which calls out to the main Rust parser for named non-terminals
2//! (which it commits to fully when it hits one in a grammar). There's a set of current NFA threads
3//! and a set of next ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
4//! pathological cases, is worse than traditional use of NFA or Earley parsing, but it's an easier
5//! fit for Macro-by-Example-style rules.
6//!
7//! (In order to prevent the pathological case, we'd need to lazily construct the resulting
8//! `NamedMatch`es at the very end. It'd be a pain, and require more memory to keep around old
9//! matcher positions, but it would also save overhead)
10//!
11//! We don't say this parser uses the Earley algorithm, because it's unnecessarily inaccurate.
12//! The macro parser restricts itself to the features of finite state automata. Earley parsers
13//! can be described as an extension of NFAs with completion rules, prediction rules, and recursion.
14//!
15//! Quick intro to how the parser works:
16//!
17//! A "matcher position" (a.k.a. "position" or "mp") is a dot in the middle of a matcher, usually
18//! written as a `·`. For example `· a $( a )* a b` is one, as is `a $( · a )* a b`.
19//!
20//! The parser walks through the input a token at a time, maintaining a list
21//! of threads consistent with the current position in the input string: `cur_mps`.
22//!
23//! As it processes them, it fills up `eof_mps` with threads that would be valid if
24//! the macro invocation is now over, `bb_mps` with threads that are waiting on
25//! a Rust non-terminal like `$e:expr`, and `next_mps` with threads that are waiting
26//! on a particular token. Most of the logic concerns moving the · through the
27//! repetitions indicated by Kleene stars. The rules for moving the · without
28//! consuming any input are called epsilon transitions. It only advances or calls
29//! out to the real Rust parser when no `cur_mps` threads remain.
30//!
31//! Example:
32//!
33//! ```text, ignore
34//! Start parsing a a a a b against [· a $( a )* a b].
35//!
36//! Remaining input: a a a a b
37//! next: [· a $( a )* a b]
38//!
39//! - - - Advance over an a. - - -
40//!
41//! Remaining input: a a a b
42//! cur: [a · $( a )* a b]
43//! Descend/Skip (first position).
44//! next: [a $( · a )* a b] [a $( a )* · a b].
45//!
46//! - - - Advance over an a. - - -
47//!
48//! Remaining input: a a b
49//! cur: [a $( a · )* a b] [a $( a )* a · b]
50//! Follow epsilon transition: Finish/Repeat (first position)
51//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
52//!
53//! - - - Advance over an a. - - - (this looks exactly like the last step)
54//!
55//! Remaining input: a b
56//! cur: [a $( a · )* a b] [a $( a )* a · b]
57//! Follow epsilon transition: Finish/Repeat (first position)
58//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
59//!
60//! - - - Advance over an a. - - - (this looks exactly like the last step)
61//!
62//! Remaining input: b
63//! cur: [a $( a · )* a b] [a $( a )* a · b]
64//! Follow epsilon transition: Finish/Repeat (first position)
65//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
66//!
67//! - - - Advance over a b. - - -
68//!
69//! Remaining input: ''
70//! eof: [a $( a )* a b ·]
71//! ```
72
73use std::borrow::Cow;
74use std::collections::hash_map::Entry::{Occupied, Vacant};
75use std::fmt::Display;
76use std::rc::Rc;
77
78pub(crate) use NamedMatch::*;
79pub(crate) use ParseResult::*;
80use rustc_ast::token::{self, DocComment, NonterminalKind, Token, TokenKind};
81use rustc_data_structures::fx::FxHashMap;
82use rustc_errors::ErrorGuaranteed;
83use rustc_lint_defs::pluralize;
84use rustc_parse::parser::{ParseNtResult, Parser, token_descr};
85use rustc_span::{Ident, MacroRulesNormalizedIdent, Span};
86
87use crate::mbe::macro_rules::Tracker;
88use crate::mbe::{KleeneOp, TokenTree};
89
90/// A unit within a matcher that a `MatcherPos` can refer to. Similar to (and derived from)
91/// `mbe::TokenTree`, but designed specifically for fast and easy traversal during matching.
92/// Notable differences to `mbe::TokenTree`:
93/// - It is non-recursive, i.e. there is no nesting.
94/// - The end pieces of each sequence (the separator, if present, and the Kleene op) are
95/// represented explicitly, as is the very end of the matcher.
96///
97/// This means a matcher can be represented by `&[MatcherLoc]`, and traversal mostly involves
98/// simply incrementing the current matcher position index by one.
99#[derive(Debug, PartialEq, Clone)]
100pub(crate) enum MatcherLoc {
101 Token {
102 token: Token,
103 },
104 Delimited,
105 Sequence {
106 op: KleeneOp,
107 num_metavar_decls: usize,
108 idx_first_after: usize,
109 next_metavar: usize,
110 seq_depth: usize,
111 },
112 SequenceKleeneOpNoSep {
113 op: KleeneOp,
114 idx_first: usize,
115 },
116 SequenceSep {
117 separator: Token,
118 },
119 SequenceKleeneOpAfterSep {
120 idx_first: usize,
121 },
122 MetaVarDecl {
123 span: Span,
124 bind: Ident,
125 kind: NonterminalKind,
126 next_metavar: usize,
127 seq_depth: usize,
128 },
129 Eof,
130}
131
132impl MatcherLoc {
133 pub(super) fn span(&self) -> Option<Span> {
134 match self {
135 MatcherLoc::Token { token } => Some(token.span),
136 MatcherLoc::Delimited => None,
137 MatcherLoc::Sequence { .. } => None,
138 MatcherLoc::SequenceKleeneOpNoSep { .. } => None,
139 MatcherLoc::SequenceSep { .. } => None,
140 MatcherLoc::SequenceKleeneOpAfterSep { .. } => None,
141 MatcherLoc::MetaVarDecl { span, .. } => Some(*span),
142 MatcherLoc::Eof => None,
143 }
144 }
145}
146
147impl Display for MatcherLoc {
148 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
149 match self {
150 MatcherLoc::Token { token } | MatcherLoc::SequenceSep { separator: token } => {
151 write!(f, "{}", token_descr(token))
152 }
153 MatcherLoc::MetaVarDecl { bind, kind, .. } => {
154 write!(f, "meta-variable `${bind}:{kind}`")
155 }
156 MatcherLoc::Eof => f.write_str("end of macro"),
157
158 // These are not printed in the diagnostic
159 MatcherLoc::Delimited => f.write_str("delimiter"),
160 MatcherLoc::Sequence { .. } => f.write_str("sequence start"),
161 MatcherLoc::SequenceKleeneOpNoSep { .. } => f.write_str("sequence end"),
162 MatcherLoc::SequenceKleeneOpAfterSep { .. } => f.write_str("sequence end"),
163 }
164 }
165}
166
167pub(super) fn compute_locs(matcher: &[TokenTree]) -> Vec<MatcherLoc> {
168 fn inner(
169 tts: &[TokenTree],
170 locs: &mut Vec<MatcherLoc>,
171 next_metavar: &mut usize,
172 seq_depth: usize,
173 ) {
174 for tt in tts {
175 match tt {
176 TokenTree::Token(token) => {
177 locs.push(MatcherLoc::Token { token: *token });
178 }
179 TokenTree::Delimited(span, _, delimited) => {
180 let open_token = Token::new(delimited.delim.as_open_token_kind(), span.open);
181 let close_token = Token::new(delimited.delim.as_close_token_kind(), span.close);
182
183 locs.push(MatcherLoc::Delimited);
184 locs.push(MatcherLoc::Token { token: open_token });
185 inner(&delimited.tts, locs, next_metavar, seq_depth);
186 locs.push(MatcherLoc::Token { token: close_token });
187 }
188 TokenTree::Sequence(_, seq) => {
189 // We can't determine `idx_first_after` and construct the final
190 // `MatcherLoc::Sequence` until after `inner()` is called and the sequence end
191 // pieces are processed. So we push a dummy value (`Eof` is cheapest to
192 // construct) now, and overwrite it with the proper value below.
193 let dummy = MatcherLoc::Eof;
194 locs.push(dummy);
195
196 let next_metavar_orig = *next_metavar;
197 let op = seq.kleene.op;
198 let idx_first = locs.len();
199 let idx_seq = idx_first - 1;
200 inner(&seq.tts, locs, next_metavar, seq_depth + 1);
201
202 if let Some(separator) = &seq.separator {
203 locs.push(MatcherLoc::SequenceSep { separator: separator.clone() });
204 locs.push(MatcherLoc::SequenceKleeneOpAfterSep { idx_first });
205 } else {
206 locs.push(MatcherLoc::SequenceKleeneOpNoSep { op, idx_first });
207 }
208
209 // Overwrite the dummy value pushed above with the proper value.
210 locs[idx_seq] = MatcherLoc::Sequence {
211 op,
212 num_metavar_decls: seq.num_captures,
213 idx_first_after: locs.len(),
214 next_metavar: next_metavar_orig,
215 seq_depth,
216 };
217 }
218 &TokenTree::MetaVarDecl { span, name: bind, kind } => {
219 locs.push(MatcherLoc::MetaVarDecl {
220 span,
221 bind,
222 kind,
223 next_metavar: *next_metavar,
224 seq_depth,
225 });
226 *next_metavar += 1;
227 }
228 TokenTree::MetaVar(..) | TokenTree::MetaVarExpr(..) => unreachable!(),
229 }
230 }
231 }
232
233 let mut locs = vec![];
234 let mut next_metavar = 0;
235 inner(matcher, &mut locs, &mut next_metavar, /* seq_depth */ 0);
236
237 // A final entry is needed for eof.
238 locs.push(MatcherLoc::Eof);
239
240 locs
241}
242
243/// A single matcher position, representing the state of matching.
244#[derive(Debug)]
245struct MatcherPos {
246 /// The index into `TtParser::locs`, which represents the "dot".
247 idx: usize,
248
249 /// The matches made against metavar decls so far. On a successful match, this vector ends up
250 /// with one element per metavar decl in the matcher. Each element records token trees matched
251 /// against the relevant metavar by the black box parser. An element will be a `MatchedSeq` if
252 /// the corresponding metavar decl is within a sequence.
253 ///
254 /// It is critical to performance that this is an `Rc`, because it gets cloned frequently when
255 /// processing sequences. Mostly for sequence-ending possibilities that must be tried but end
256 /// up failing.
257 matches: Rc<Vec<NamedMatch>>,
258}
259
260// This type is used a lot. Make sure it doesn't unintentionally get bigger.
261#[cfg(target_pointer_width = "64")]
262rustc_data_structures::static_assert_size!(MatcherPos, 16);
263
264impl MatcherPos {
265 /// Adds `m` as a named match for the `metavar_idx`-th metavar. There are only two call sites,
266 /// and both are hot enough to be always worth inlining.
267 #[inline(always)]
268 fn push_match(&mut self, metavar_idx: usize, seq_depth: usize, m: NamedMatch) {
269 let matches = Rc::make_mut(&mut self.matches);
270 match seq_depth {
271 0 => {
272 // We are not within a sequence. Just append `m`.
273 assert_eq!(metavar_idx, matches.len());
274 matches.push(m);
275 }
276 _ => {
277 // We are within a sequence. Find the final `MatchedSeq` at the appropriate depth
278 // and append `m` to its vector.
279 let mut curr = &mut matches[metavar_idx];
280 for _ in 0..seq_depth - 1 {
281 match curr {
282 MatchedSeq(seq) => curr = seq.last_mut().unwrap(),
283 _ => unreachable!(),
284 }
285 }
286 match curr {
287 MatchedSeq(seq) => seq.push(m),
288 _ => unreachable!(),
289 }
290 }
291 }
292 }
293}
294
295enum EofMatcherPositions {
296 None,
297 One(MatcherPos),
298 Multiple,
299}
300
301/// Represents the possible results of an attempted parse.
302#[derive(Debug)]
303pub(crate) enum ParseResult<T, F> {
304 /// Parsed successfully.
305 Success(T),
306 /// Arm failed to match. If the second parameter is `token::Eof`, it indicates an unexpected
307 /// end of macro invocation. Otherwise, it indicates that no rules expected the given token.
308 /// The usize is the approximate position of the token in the input token stream.
309 Failure(F),
310 /// Fatal error (malformed macro?). Abort compilation.
311 Error(rustc_span::Span, String),
312 ErrorReported(ErrorGuaranteed),
313}
314
315/// A `ParseResult` where the `Success` variant contains a mapping of
316/// `MacroRulesNormalizedIdent`s to `NamedMatch`es. This represents the mapping
317/// of metavars to the token trees they bind to.
318pub(crate) type NamedParseResult<F> = ParseResult<NamedMatches, F>;
319
320/// Contains a mapping of `MacroRulesNormalizedIdent`s to `NamedMatch`es.
321/// This represents the mapping of metavars to the token trees they bind to.
322pub(crate) type NamedMatches = FxHashMap<MacroRulesNormalizedIdent, NamedMatch>;
323
324/// Count how many metavars declarations are in `matcher`.
325pub(super) fn count_metavar_decls(matcher: &[TokenTree]) -> usize {
326 matcher
327 .iter()
328 .map(|tt| match tt {
329 TokenTree::MetaVarDecl { .. } => 1,
330 TokenTree::Sequence(_, seq) => seq.num_captures,
331 TokenTree::Delimited(.., delim) => count_metavar_decls(&delim.tts),
332 TokenTree::Token(..) => 0,
333 TokenTree::MetaVar(..) | TokenTree::MetaVarExpr(..) => unreachable!(),
334 })
335 .sum()
336}
337
338/// `NamedMatch` is a pattern-match result for a single metavar. All
339/// `MatchedNonterminal`s in the `NamedMatch` have the same non-terminal type
340/// (expr, item, etc).
341///
342/// The in-memory structure of a particular `NamedMatch` represents the match
343/// that occurred when a particular subset of a matcher was applied to a
344/// particular token tree.
345///
346/// The width of each `MatchedSeq` in the `NamedMatch`, and the identity of
347/// the `MatchedNtNonTts`s, will depend on the token tree it was applied
348/// to: each `MatchedSeq` corresponds to a single repetition in the originating
349/// token tree. The depth of the `NamedMatch` structure will therefore depend
350/// only on the nesting depth of repetitions in the originating token tree it
351/// was derived from.
352///
353/// In layperson's terms: `NamedMatch` will form a tree representing nested matches of a particular
354/// meta variable. For example, if we are matching the following macro against the following
355/// invocation...
356///
357/// ```rust
358/// macro_rules! foo {
359/// ($($($x:ident),+);+) => {}
360/// }
361///
362/// foo!(a, b, c, d; a, b, c, d, e);
363/// ```
364///
365/// Then, the tree will have the following shape:
366///
367/// ```ignore (private-internal)
368/// # use NamedMatch::*;
369/// MatchedSeq([
370/// MatchedSeq([
371/// MatchedNonterminal(a),
372/// MatchedNonterminal(b),
373/// MatchedNonterminal(c),
374/// MatchedNonterminal(d),
375/// ]),
376/// MatchedSeq([
377/// MatchedNonterminal(a),
378/// MatchedNonterminal(b),
379/// MatchedNonterminal(c),
380/// MatchedNonterminal(d),
381/// MatchedNonterminal(e),
382/// ])
383/// ])
384/// ```
385#[derive(Debug, Clone)]
386pub(crate) enum NamedMatch {
387 MatchedSeq(Vec<NamedMatch>),
388 MatchedSingle(ParseNtResult),
389}
390
391/// Performs a token equality check, ignoring syntax context (that is, an unhygienic comparison)
392fn token_name_eq(t1: &Token, t2: &Token) -> bool {
393 if let (Some((ident1, is_raw1)), Some((ident2, is_raw2))) = (t1.ident(), t2.ident()) {
394 ident1.name == ident2.name && is_raw1 == is_raw2
395 } else if let (Some((ident1, is_raw1)), Some((ident2, is_raw2))) =
396 (t1.lifetime(), t2.lifetime())
397 {
398 ident1.name == ident2.name && is_raw1 == is_raw2
399 } else {
400 // Note: we SHOULD NOT use `t1.kind == t2.kind` here, and we should instead compare the
401 // tokens using the special comparison logic below.
402 // It makes sure that variants containing `InvisibleOrigin` will
403 // never compare equal to one another.
404 //
405 // When we had AST-based nonterminals we couldn't compare them, and the
406 // old `Nonterminal` type had an `eq` that always returned false,
407 // resulting in this restriction:
408 // <https://doc.rust-lang.org/nightly/reference/macros-by-example.html#forwarding-a-matched-fragment>
409 // This comparison logic emulates that behaviour. We could consider lifting this
410 // restriction now but there are still cases involving invisible
411 // delimiters that make it harder than it first appears.
412 match (t1.kind, t2.kind) {
413 (TokenKind::OpenInvisible(_) | TokenKind::CloseInvisible(_), _)
414 | (_, TokenKind::OpenInvisible(_) | TokenKind::CloseInvisible(_)) => false,
415 (a, b) => a == b,
416 }
417 }
418}
419
420// Note: the vectors could be created and dropped within `parse_tt`, but to avoid excess
421// allocations we have a single vector for each kind that is cleared and reused repeatedly.
422pub(crate) struct TtParser {
423 macro_name: Ident,
424
425 /// The set of current mps to be processed. This should be empty by the end of a successful
426 /// execution of `parse_tt_inner`.
427 cur_mps: Vec<MatcherPos>,
428
429 /// The set of newly generated mps. These are used to replenish `cur_mps` in the function
430 /// `parse_tt`.
431 next_mps: Vec<MatcherPos>,
432
433 /// The set of mps that are waiting for the black-box parser.
434 bb_mps: Vec<MatcherPos>,
435
436 /// Pre-allocate an empty match array, so it can be cloned cheaply for macros with many rules
437 /// that have no metavars.
438 empty_matches: Rc<Vec<NamedMatch>>,
439}
440
441impl TtParser {
442 pub(super) fn new(macro_name: Ident) -> TtParser {
443 TtParser {
444 macro_name,
445 cur_mps: vec![],
446 next_mps: vec![],
447 bb_mps: vec![],
448 empty_matches: Rc::new(vec![]),
449 }
450 }
451
452 pub(super) fn has_no_remaining_items_for_step(&self) -> bool {
453 self.cur_mps.is_empty()
454 }
455
456 /// Process the matcher positions of `cur_mps` until it is empty. In the process, this will
457 /// produce more mps in `next_mps` and `bb_mps`.
458 ///
459 /// # Returns
460 ///
461 /// `Some(result)` if everything is finished, `None` otherwise. Note that matches are kept
462 /// track of through the mps generated.
463 fn parse_tt_inner<'matcher, T: Tracker<'matcher>>(
464 &mut self,
465 matcher: &'matcher [MatcherLoc],
466 token: &Token,
467 approx_position: u32,
468 track: &mut T,
469 ) -> Option<NamedParseResult<T::Failure>> {
470 // Matcher positions that would be valid if the macro invocation was over now. Only
471 // modified if `token == Eof`.
472 let mut eof_mps = EofMatcherPositions::None;
473
474 while let Some(mut mp) = self.cur_mps.pop() {
475 let matcher_loc = &matcher[mp.idx];
476 track.before_match_loc(self, matcher_loc);
477
478 match matcher_loc {
479 MatcherLoc::Token { token: t } => {
480 // If it's a doc comment, we just ignore it and move on to the next tt in the
481 // matcher. This is a bug, but #95267 showed that existing programs rely on
482 // this behaviour, and changing it would require some care and a transition
483 // period.
484 //
485 // If the token matches, we can just advance the parser.
486 //
487 // Otherwise, this match has failed, there is nothing to do, and hopefully
488 // another mp in `cur_mps` will match.
489 if matches!(t, Token { kind: DocComment(..), .. }) {
490 mp.idx += 1;
491 self.cur_mps.push(mp);
492 } else if token_name_eq(t, token) {
493 mp.idx += 1;
494 self.next_mps.push(mp);
495 }
496 }
497 MatcherLoc::Delimited => {
498 // Entering the delimiter is trivial.
499 mp.idx += 1;
500 self.cur_mps.push(mp);
501 }
502 &MatcherLoc::Sequence {
503 op,
504 num_metavar_decls,
505 idx_first_after,
506 next_metavar,
507 seq_depth,
508 } => {
509 // Install an empty vec for each metavar within the sequence.
510 for metavar_idx in next_metavar..next_metavar + num_metavar_decls {
511 mp.push_match(metavar_idx, seq_depth, MatchedSeq(vec![]));
512 }
513
514 if matches!(op, KleeneOp::ZeroOrMore | KleeneOp::ZeroOrOne) {
515 // Try zero matches of this sequence, by skipping over it.
516 self.cur_mps.push(MatcherPos {
517 idx: idx_first_after,
518 matches: Rc::clone(&mp.matches),
519 });
520 }
521
522 // Try one or more matches of this sequence, by entering it.
523 mp.idx += 1;
524 self.cur_mps.push(mp);
525 }
526 &MatcherLoc::SequenceKleeneOpNoSep { op, idx_first } => {
527 // We are past the end of a sequence with no separator. Try ending the
528 // sequence. If that's not possible, `ending_mp` will fail quietly when it is
529 // processed next time around the loop.
530 let ending_mp = MatcherPos {
531 idx: mp.idx + 1, // +1 skips the Kleene op
532 matches: Rc::clone(&mp.matches),
533 };
534 self.cur_mps.push(ending_mp);
535
536 if op != KleeneOp::ZeroOrOne {
537 // Try another repetition.
538 mp.idx = idx_first;
539 self.cur_mps.push(mp);
540 }
541 }
542 MatcherLoc::SequenceSep { separator } => {
543 // We are past the end of a sequence with a separator but we haven't seen the
544 // separator yet. Try ending the sequence. If that's not possible, `ending_mp`
545 // will fail quietly when it is processed next time around the loop.
546 let ending_mp = MatcherPos {
547 idx: mp.idx + 2, // +2 skips the separator and the Kleene op
548 matches: Rc::clone(&mp.matches),
549 };
550 self.cur_mps.push(ending_mp);
551
552 if token_name_eq(token, separator) {
553 // The separator matches the current token. Advance past it.
554 mp.idx += 1;
555 self.next_mps.push(mp);
556 }
557 }
558 &MatcherLoc::SequenceKleeneOpAfterSep { idx_first } => {
559 // We are past the sequence separator. This can't be a `?` Kleene op, because
560 // they don't permit separators. Try another repetition.
561 mp.idx = idx_first;
562 self.cur_mps.push(mp);
563 }
564 &MatcherLoc::MetaVarDecl { kind, .. } => {
565 // Built-in nonterminals never start with these tokens, so we can eliminate
566 // them from consideration. We use the span of the metavariable declaration
567 // to determine any edition-specific matching behavior for non-terminals.
568 if Parser::nonterminal_may_begin_with(kind, token) {
569 self.bb_mps.push(mp);
570 }
571 }
572 MatcherLoc::Eof => {
573 // We are past the matcher's end, and not in a sequence. Try to end things.
574 debug_assert_eq!(mp.idx, matcher.len() - 1);
575 if *token == token::Eof {
576 eof_mps = match eof_mps {
577 EofMatcherPositions::None => EofMatcherPositions::One(mp),
578 EofMatcherPositions::One(_) | EofMatcherPositions::Multiple => {
579 EofMatcherPositions::Multiple
580 }
581 }
582 }
583 }
584 }
585 }
586
587 // If we reached the end of input, check that there is EXACTLY ONE possible matcher.
588 // Otherwise, either the parse is ambiguous (which is an error) or there is a syntax error.
589 if *token == token::Eof {
590 Some(match eof_mps {
591 EofMatcherPositions::One(mut eof_mp) => {
592 // Need to take ownership of the matches from within the `Rc`.
593 Rc::make_mut(&mut eof_mp.matches);
594 let matches = Rc::try_unwrap(eof_mp.matches).unwrap().into_iter();
595 self.nameize(matcher, matches)
596 }
597 EofMatcherPositions::Multiple => {
598 Error(token.span, "ambiguity: multiple successful parses".to_string())
599 }
600 EofMatcherPositions::None => Failure(T::build_failure(
601 Token::new(
602 token::Eof,
603 if token.span.is_dummy() { token.span } else { token.span.shrink_to_hi() },
604 ),
605 approx_position,
606 "missing tokens in macro arguments",
607 )),
608 })
609 } else {
610 None
611 }
612 }
613
614 /// Match the token stream from `parser` against `matcher`.
615 pub(super) fn parse_tt<'matcher, T: Tracker<'matcher>>(
616 &mut self,
617 parser: &mut Cow<'_, Parser<'_>>,
618 matcher: &'matcher [MatcherLoc],
619 track: &mut T,
620 ) -> NamedParseResult<T::Failure> {
621 // A queue of possible matcher positions. We initialize it with the matcher position in
622 // which the "dot" is before the first token of the first token tree in `matcher`.
623 // `parse_tt_inner` then processes all of these possible matcher positions and produces
624 // possible next positions into `next_mps`. After some post-processing, the contents of
625 // `next_mps` replenish `cur_mps` and we start over again.
626 self.cur_mps.clear();
627 self.cur_mps.push(MatcherPos { idx: 0, matches: Rc::clone(&self.empty_matches) });
628
629 loop {
630 self.next_mps.clear();
631 self.bb_mps.clear();
632
633 // Process `cur_mps` until either we have finished the input or we need to get some
634 // parsing from the black-box parser done.
635 let res = self.parse_tt_inner(
636 matcher,
637 &parser.token,
638 parser.approx_token_stream_pos(),
639 track,
640 );
641
642 if let Some(res) = res {
643 return res;
644 }
645
646 // `parse_tt_inner` handled all of `cur_mps`, so it's empty.
647 assert!(self.cur_mps.is_empty());
648
649 // Error messages here could be improved with links to original rules.
650 match (self.next_mps.len(), self.bb_mps.len()) {
651 (0, 0) => {
652 // There are no possible next positions AND we aren't waiting for the black-box
653 // parser: syntax error.
654 return Failure(T::build_failure(
655 parser.token,
656 parser.approx_token_stream_pos(),
657 "no rules expected this token in macro call",
658 ));
659 }
660
661 (_, 0) => {
662 // Dump all possible `next_mps` into `cur_mps` for the next iteration. Then
663 // process the next token.
664 self.cur_mps.append(&mut self.next_mps);
665 parser.to_mut().bump();
666 }
667
668 (0, 1) => {
669 // We need to call the black-box parser to get some nonterminal.
670 let mut mp = self.bb_mps.pop().unwrap();
671 let loc = &matcher[mp.idx];
672 if let &MatcherLoc::MetaVarDecl {
673 span, kind, next_metavar, seq_depth, ..
674 } = loc
675 {
676 // We use the span of the metavariable declaration to determine any
677 // edition-specific matching behavior for non-terminals.
678 let nt = match parser.to_mut().parse_nonterminal(kind) {
679 Err(err) => {
680 let guarantee = err.with_span_label(
681 span,
682 format!(
683 "while parsing argument for this `{kind}` macro fragment"
684 ),
685 )
686 .emit();
687 return ErrorReported(guarantee);
688 }
689 Ok(nt) => nt,
690 };
691 mp.push_match(next_metavar, seq_depth, MatchedSingle(nt));
692 mp.idx += 1;
693 } else {
694 unreachable!()
695 }
696 self.cur_mps.push(mp);
697 }
698
699 (_, _) => {
700 // Too many possibilities!
701 return self.ambiguity_error(matcher, parser.token.span);
702 }
703 }
704
705 assert!(!self.cur_mps.is_empty());
706 }
707 }
708
709 fn ambiguity_error<F>(
710 &self,
711 matcher: &[MatcherLoc],
712 token_span: rustc_span::Span,
713 ) -> NamedParseResult<F> {
714 let nts = self
715 .bb_mps
716 .iter()
717 .map(|mp| match &matcher[mp.idx] {
718 MatcherLoc::MetaVarDecl { bind, kind, .. } => {
719 format!("{kind} ('{bind}')")
720 }
721 _ => unreachable!(),
722 })
723 .collect::<Vec<String>>()
724 .join(" or ");
725
726 Error(
727 token_span,
728 format!(
729 "local ambiguity when calling macro `{}`: multiple parsing options: {}",
730 self.macro_name,
731 match self.next_mps.len() {
732 0 => format!("built-in NTs {nts}."),
733 n => format!("built-in NTs {nts} or {n} other option{s}.", s = pluralize!(n)),
734 }
735 ),
736 )
737 }
738
739 fn nameize<I: Iterator<Item = NamedMatch>, F>(
740 &self,
741 matcher: &[MatcherLoc],
742 mut res: I,
743 ) -> NamedParseResult<F> {
744 // Make that each metavar has _exactly one_ binding. If so, insert the binding into the
745 // `NamedParseResult`. Otherwise, it's an error.
746 let mut ret_val = FxHashMap::default();
747 for loc in matcher {
748 if let &MatcherLoc::MetaVarDecl { span, bind, .. } = loc {
749 match ret_val.entry(MacroRulesNormalizedIdent::new(bind)) {
750 Vacant(spot) => spot.insert(res.next().unwrap()),
751 Occupied(..) => {
752 return Error(span, format!("duplicated bind name: {bind}"));
753 }
754 };
755 }
756 }
757 Success(ret_val)
758 }
759}