Skip to content

Add benchmark tests for mbe #7566

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Feb 27, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

560 changes: 560 additions & 0 deletions bench_data/numerous_macro_rules

Large diffs are not rendered by default.

3 changes: 3 additions & 0 deletions crates/mbe/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,3 +19,6 @@ parser = { path = "../parser", version = "0.0.0" }
tt = { path = "../tt", version = "0.0.0" }
test_utils = { path = "../test_utils", version = "0.0.0" }

[dev-dependencies]
profile = { path = "../profile", version = "0.0.0" }

211 changes: 211 additions & 0 deletions crates/mbe/src/benchmark.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,211 @@
//! This module add real world mbe example for benchmark tests

use rustc_hash::FxHashMap;
use syntax::{
ast::{self, NameOwner},
AstNode, SmolStr,
};
use test_utils::{bench, bench_fixture, skip_slow_tests};

use crate::{
ast_to_token_tree,
parser::{Op, RepeatKind, Separator},
MacroRules,
};

#[test]
fn benchmark_parse_macro_rules() {
if skip_slow_tests() {
return;
}
let rules = macro_rules_fixtures_tt();
let hash: usize = {
let _pt = bench("mbe parse macro rules");
rules.values().map(|it| MacroRules::parse(it).unwrap().rules.len()).sum()
};
assert_eq!(hash, 1144);
}

#[test]
fn benchmark_expand_macro_rules() {
if skip_slow_tests() {
return;
}
let rules = macro_rules_fixtures();
let invocations = invocation_fixtures(&rules);

let hash: usize = {
let _pt = bench("mbe expand macro rules");
invocations
.into_iter()
.map(|(id, tt)| {
let res = rules[&id].expand(&tt);
if res.err.is_some() {
// FIXME:
// Currently `invocation_fixtures` will generate some correct invocations but
// cannot be expanded by mbe. We ignore errors here.
// See: https://github.com/rust-analyzer/rust-analyzer/issues/4777
eprintln!("err from {} {:?}", id, res.err);
}
res.value.token_trees.len()
})
.sum()
};
assert_eq!(hash, 66995);
}

fn macro_rules_fixtures() -> FxHashMap<String, MacroRules> {
macro_rules_fixtures_tt()
.into_iter()
.map(|(id, tt)| (id, MacroRules::parse(&tt).unwrap()))
.collect()
}

fn macro_rules_fixtures_tt() -> FxHashMap<String, tt::Subtree> {
let fixture = bench_fixture::numerous_macro_rules();
let source_file = ast::SourceFile::parse(&fixture).ok().unwrap();

source_file
.syntax()
.descendants()
.filter_map(ast::MacroRules::cast)
.map(|rule| {
let id = rule.name().unwrap().to_string();
let (def_tt, _) = ast_to_token_tree(&rule.token_tree().unwrap()).unwrap();
(id, def_tt)
})
.collect()
}

// Generate random invocation fixtures from rules
fn invocation_fixtures(rules: &FxHashMap<String, MacroRules>) -> Vec<(String, tt::Subtree)> {
let mut seed = 123456789;
let mut res = Vec::new();

for (name, it) in rules {
for rule in &it.rules {
// Generate twice
for _ in 0..2 {
let mut subtree = tt::Subtree::default();
for op in rule.lhs.iter() {
collect_from_op(op, &mut subtree, &mut seed);
}
res.push((name.clone(), subtree));
}
}
}
return res;

fn collect_from_op(op: &Op, parent: &mut tt::Subtree, seed: &mut usize) {
return match op {
Op::Var { kind, .. } => match kind.as_ref().map(|it| it.as_str()) {
Some("ident") => parent.token_trees.push(make_ident("foo")),
Some("ty") => parent.token_trees.push(make_ident("Foo")),
Some("tt") => parent.token_trees.push(make_ident("foo")),
Some("vis") => parent.token_trees.push(make_ident("pub")),
Some("pat") => parent.token_trees.push(make_ident("foo")),
Some("path") => parent.token_trees.push(make_ident("foo")),
Some("literal") => parent.token_trees.push(make_literal("1")),
Some("expr") => parent.token_trees.push(make_ident("foo").into()),
Some("lifetime") => {
parent.token_trees.push(make_punct('\''));
parent.token_trees.push(make_ident("a"));
}
Some("block") => {
parent.token_trees.push(make_subtree(tt::DelimiterKind::Brace, None))
}
Some("item") => {
parent.token_trees.push(make_ident("fn"));
parent.token_trees.push(make_ident("foo"));
parent.token_trees.push(make_subtree(tt::DelimiterKind::Parenthesis, None));
parent.token_trees.push(make_subtree(tt::DelimiterKind::Brace, None));
}
Some("meta") => {
parent.token_trees.push(make_ident("foo"));
parent.token_trees.push(make_subtree(tt::DelimiterKind::Parenthesis, None));
}

None => (),
Some(kind) => panic!("Unhandled kind {}", kind),
},
Op::Leaf(leaf) => parent.token_trees.push(leaf.clone().into()),
Op::Repeat { tokens, kind, separator } => {
let max = 10;
let cnt = match kind {
RepeatKind::ZeroOrMore => rand(seed) % max,
RepeatKind::OneOrMore => 1 + rand(seed) % max,
RepeatKind::ZeroOrOne => rand(seed) % 2,
};
for i in 0..cnt {
for it in tokens.iter() {
collect_from_op(it, parent, seed);
}
if i + 1 != cnt {
if let Some(sep) = separator {
match sep {
Separator::Literal(it) => parent
.token_trees
.push(tt::Leaf::Literal(it.clone().into()).into()),
Separator::Ident(it) => parent
.token_trees
.push(tt::Leaf::Ident(it.clone().into()).into()),
Separator::Puncts(puncts) => {
for it in puncts {
parent
.token_trees
.push(tt::Leaf::Punct(it.clone().into()).into())
}
}
};
}
}
}
}
Op::Subtree { tokens, delimiter } => {
let mut subtree =
tt::Subtree { delimiter: delimiter.clone(), token_trees: Vec::new() };
tokens.iter().for_each(|it| {
collect_from_op(it, &mut subtree, seed);
});
parent.token_trees.push(subtree.into());
}
};

// Simple linear congruential generator for determistic result
fn rand(seed: &mut usize) -> usize {
let a = 1664525;
let c = 1013904223;
*seed = usize::wrapping_add(usize::wrapping_mul(*seed, a), c);
return *seed;
};
fn make_ident(ident: &str) -> tt::TokenTree {
tt::Leaf::Ident(tt::Ident { id: tt::TokenId::unspecified(), text: SmolStr::new(ident) })
.into()
}
fn make_punct(char: char) -> tt::TokenTree {
tt::Leaf::Punct(tt::Punct {
id: tt::TokenId::unspecified(),
char,
spacing: tt::Spacing::Alone,
})
.into()
}
fn make_literal(lit: &str) -> tt::TokenTree {
tt::Leaf::Literal(tt::Literal {
id: tt::TokenId::unspecified(),
text: SmolStr::new(lit),
})
.into()
}
fn make_subtree(
kind: tt::DelimiterKind,
token_trees: Option<Vec<tt::TokenTree>>,
) -> tt::TokenTree {
tt::Subtree {
delimiter: Some(tt::Delimiter { id: tt::TokenId::unspecified(), kind }),
token_trees: token_trees.unwrap_or_default(),
}
.into()
}
}
}
3 changes: 3 additions & 0 deletions crates/mbe/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@ mod subtree_source;
#[cfg(test)]
mod tests;

#[cfg(test)]
mod benchmark;

use std::fmt;

use test_utils::mark;
Expand Down
5 changes: 5 additions & 0 deletions crates/test_utils/src/bench_fixture.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,3 +35,8 @@ pub fn glorious_old_parser() -> String {
let path = project_dir().join("bench_data/glorious_old_parser");
fs::read_to_string(&path).unwrap()
}

pub fn numerous_macro_rules() -> String {
let path = project_dir().join("bench_data/numerous_macro_rules");
fs::read_to_string(&path).unwrap()
}