diff --git a/.github/workflows/benchmarks-execute.yml b/.github/workflows/benchmarks-execute.yml new file mode 100644 index 0000000000..741ccdb0f1 --- /dev/null +++ b/.github/workflows/benchmarks-execute.yml @@ -0,0 +1,112 @@ +name: "benchmarks-execute" + +on: + push: + branches: ["main"] + pull_request: + types: [opened, synchronize, reopened, labeled] + branches: ["**"] + paths: + - "benchmarks/execute/**" + - "crates/circuits/**" + - "crates/toolchain/**" + - "crates/prof/**" + - "crates/sdk/**" + - "crates/vm/**" + - "extensions/**" + - "Cargo.toml" + - ".github/workflows/benchmarks-execute.yml" + workflow_dispatch: + +env: + CARGO_TERM_COLOR: always + +jobs: + execute-benchmarks: + runs-on: + - runs-on=${{ github.run_id }} + - runner=8cpu-linux-x64 + steps: + - uses: actions/checkout@v4 + + - name: Set up Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Run execution benchmarks + working-directory: benchmarks/execute + run: cargo run | tee benchmark_output.log + + - name: Parse benchmark results + working-directory: benchmarks/execute + run: | + # Determine if running in GitHub Actions environment + if [ -n "$GITHUB_STEP_SUMMARY" ]; then + SUMMARY_FILE="$GITHUB_STEP_SUMMARY" + echo "### Benchmark Results Summary" >> "$SUMMARY_FILE" + else + SUMMARY_FILE="benchmark_summary.md" + echo "### Benchmark Results Summary" > "$SUMMARY_FILE" + echo "Saving summary to $SUMMARY_FILE" + fi + + # Set up summary table header + echo "| Program | Total Time (ms) |" >> "$SUMMARY_FILE" + echo "| ------- | --------------- |" >> "$SUMMARY_FILE" + + # Variables to track current program and total time + current_program="" + total_time=0 + + # Process the output file line by line + while IFS= read -r line; do + # Check if line contains "Running program" message + if [[ $line =~ i\ \[info\]:\ Running\ program:\ ([a-zA-Z0-9_-]+) ]]; then + # If we were processing a program, output its results + if [[ -n "$current_program" ]]; then + echo "| $current_program | $total_time |" >> "$SUMMARY_FILE" + fi + + # Start tracking new program + current_program="${BASH_REMATCH[1]}" + total_time=0 + fi + + # Check for program completion to catch programs that might have no execution segments + if [[ $line =~ i\ \[info\]:\ Completed\ program:\ ([a-zA-Z0-9_-]+) ]]; then + completed_program="${BASH_REMATCH[1]}" + # If no segments were found for this program, ensure it's still in the output + if [[ "$current_program" == "$completed_program" && $total_time == 0 ]]; then + echo "| $current_program | 0 |" >> "$SUMMARY_FILE" + current_program="" + fi + fi + + # Check if line contains execution time (looking for the format with ms or s) + if [[ $line =~ execute_segment\ \[\ ([0-9.]+)(ms|s)\ \|\ [0-9.]+%\ \]\ segment ]]; then + segment_time="${BASH_REMATCH[1]}" + unit="${BASH_REMATCH[2]}" + + # Convert to milliseconds if in seconds + if [[ "$unit" == "s" ]]; then + segment_time=$(echo "scale=6; $segment_time * 1000" | bc) + fi + + # Add segment time to total + total_time=$(echo "scale=6; $total_time + $segment_time" | bc) + fi + done < benchmark_output.log + + # Output the last program result if there was one + if [[ -n "$current_program" ]]; then + echo "| $current_program | $total_time |" >> "$SUMMARY_FILE" + fi + + # If not in GitHub Actions, print the summary to the terminal + if [ -z "$GITHUB_STEP_SUMMARY" ]; then + echo -e "\nBenchmark Summary:" + cat "$SUMMARY_FILE" + fi diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index c3c9eb8d6a..1a76f847e0 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -7,7 +7,7 @@ on: types: [opened, synchronize, reopened, labeled] branches: ["**"] paths: - - "benchmarks/**" + - "benchmarks/prove/**" - "crates/circuits/**" - "crates/toolchain/**" - "crates/prof/**" diff --git a/.github/workflows/cli.yml b/.github/workflows/cli.yml index 9cf0443e8d..14a346441d 100644 --- a/.github/workflows/cli.yml +++ b/.github/workflows/cli.yml @@ -12,6 +12,7 @@ on: - "crates/cli/**" - "examples/**" - "Cargo.toml" + - ".github/workflows/cli.yml" concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} @@ -39,10 +40,6 @@ jobs: - name: Install solc # svm should support arm64 linux run: (hash svm 2>/dev/null || cargo install --version 0.2.23 svm-rs) && svm install 0.8.19 && solc --version - - name: Install tools - run: | - rustup component add rust-src --toolchain nightly-2025-02-14 - - name: Install cargo-openvm working-directory: crates/cli run: | @@ -74,3 +71,8 @@ jobs: export RUST_BACKTRACE=1 cargo build cargo run --bin cargo-openvm -- openvm keygen --config ./example/app_config.toml --output app.pk --vk-output app.vk + + - name: Run CLI tests + working-directory: crates/cli + run: | + cargo nextest run --cargo-profile=fast diff --git a/.github/workflows/lints.yml b/.github/workflows/lints.yml index 9fa9dec714..bb99d4ff4e 100644 --- a/.github/workflows/lints.yml +++ b/.github/workflows/lints.yml @@ -15,7 +15,7 @@ jobs: name: Lint runs-on: - runs-on=${{ github.run_id }} - - runner=8cpu-linux-x64 + - runner=64cpu-linux-x64 - extras=s3-cache steps: - uses: runs-on/action@v1 @@ -45,8 +45,8 @@ jobs: run: | # list of features generated using: # echo -e "\033[1;32mAll unique features across workspace:\033[0m" && cargo metadata --format-version=1 --no-deps | jq -r '.packages[].features | to_entries[] | .key' | sort -u | sed 's/^/• /' - cargo clippy --all-targets --all --tests --features "aggregation bench-metrics bls12_381 bn254 default entrypoint export-getrandom export-libm function-span getrandom halo2-compiler halo2curves heap-embedded-alloc k256 mimalloc nightly-features panic-handler parallel rust-runtime static-verifier std test-utils unstable" -- -D warnings - cargo clippy --all-targets --all --tests --no-default-features --features "jemalloc jemalloc-prof" -- -D warnings + cargo clippy --all-targets --all --tests --features "aggregation bench-metrics bls12_381 bn254 default entrypoint evm-prove evm-verify export-getrandom export-libm function-span getrandom halo2-compiler halo2curves heap-embedded-alloc k256 jemalloc jemalloc-prof nightly-features panic-handler parallel rust-runtime static-verifier std test-utils unstable" -- -D warnings + cargo clippy --all-targets --all --tests --no-default-features --features "mimalloc" -- -D warnings - name: Run fmt, clippy for guest run: | diff --git a/.github/workflows/native-compiler.yml b/.github/workflows/native-compiler.yml index 20bd680d6e..af4f39ddff 100644 --- a/.github/workflows/native-compiler.yml +++ b/.github/workflows/native-compiler.yml @@ -10,6 +10,7 @@ on: - "extensions/native/circuit/**" - "extensions/native/compiler/**" - "Cargo.toml" + - ".github/workflows/native-compiler.yml" concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} diff --git a/.github/workflows/pr-title.yml b/.github/workflows/pr-title.yml new file mode 100644 index 0000000000..4b54db45fe --- /dev/null +++ b/.github/workflows/pr-title.yml @@ -0,0 +1,83 @@ +# Copied from https://github.com/paradigmxyz/reth/blob/a5755f72eb54d2c88d6f0b5778413144451e1724/.github/workflows/pr-title.yml +name: Pull Request + +on: + pull_request: + types: + - opened + - reopened + - edited + - synchronize + +permissions: + pull-requests: read + contents: read + +jobs: + conventional-title: + name: Validate PR title is Conventional Commit + runs-on: + - runs-on=${{ github.run_id }} + - runner=8cpu-linux-x64 + permissions: + pull-requests: write + steps: + - name: Check title + id: lint_pr_title + uses: amannn/action-semantic-pull-request@v5 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + types: | + fix + feat + chore + test + perf + refactor + docs + ci + continue-on-error: true + - name: Add PR Comment for Invalid Title + if: steps.lint_pr_title.outcome == 'failure' + uses: marocchino/sticky-pull-request-comment@v2 + with: + header: pr-title-lint-error + message: | + Your PR title doesn't follow the Conventional Commit guidelines. + + **Example of valid titles:** + - `feat: add new user login` + - `fix: correct button size` + - `docs: update README` + + **Usage:** + - `feat`: Introduces a new feature + - `fix`: Patches a bug + - `chore`: General maintenance tasks or updates + - `test`: Adding new tests or modifying existing tests + - `perf`: Performance improvements + - `refactor`: Changes to improve code structure + - `docs`: Documentation updates + - `ci`: Changes to CI/CD configurations + + **Breaking Changes** + + Breaking changes are noted by using an exclamation mark. For example: + - `feat!: changed the API` + - `chore(node)!: Removed unused public function` + + **Help** + + For more information, follow the guidelines here: https://www.conventionalcommits.org/en/v1.0.0/ + + - name: Remove Comment for Valid Title + if: steps.lint_pr_title.outcome == 'success' + uses: marocchino/sticky-pull-request-comment@v2 + with: + header: pr-title-lint-error + delete: true + + - name: Fail workflow if title invalid + if: steps.lint_pr_title.outcome == 'failure' + run: exit 1 diff --git a/.github/workflows/primitives.yml b/.github/workflows/primitives.yml index d94e1c25e1..714230b8cd 100644 --- a/.github/workflows/primitives.yml +++ b/.github/workflows/primitives.yml @@ -9,7 +9,9 @@ on: - "crates/circuits/primitives/**" - "crates/circuits/poseidon2-air/**" - "crates/circuits/sha256-air/**" + - "crates/circuits/mod-builder/**" - "Cargo.toml" + - ".github/workflows/primitives.yml" concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} @@ -49,3 +51,8 @@ jobs: working-directory: crates/circuits/sha256-air run: | cargo nextest run --cargo-profile fast --features parallel + + - name: Run tests for mod-builder + working-directory: crates/circuits/mod-builder + run: | + cargo nextest run --cargo-profile fast --features parallel diff --git a/.github/workflows/recursion.yml b/.github/workflows/recursion.yml index f00c8e614d..814c1fa44a 100644 --- a/.github/workflows/recursion.yml +++ b/.github/workflows/recursion.yml @@ -11,6 +11,7 @@ on: - "extensions/native/compiler/**" - "extensions/native/recursion/**" - "Cargo.toml" + - ".github/workflows/recursion.yml" concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} diff --git a/.github/workflows/sdk.yml b/.github/workflows/sdk.yml index 97033b77db..873a0c7e2b 100644 --- a/.github/workflows/sdk.yml +++ b/.github/workflows/sdk.yml @@ -10,6 +10,7 @@ on: - "crates/vm/**" - "crates/sdk/**" - "Cargo.toml" + - ".github/workflows/sdk.yml" concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} @@ -38,6 +39,9 @@ jobs: - name: Install solc # svm should support arm64 linux run: (hash svm 2>/dev/null || cargo install --version 0.2.23 svm-rs) && svm install 0.8.19 && solc --version + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + - name: Install architecture specific tools run: | arch=$(uname -m) @@ -73,8 +77,24 @@ jobs: run: | bash ../../extensions/native/recursion/trusted_setup_s3.sh + - name: Run openvm-sdk contracts/ tests + working-directory: crates/sdk/contracts + run: | + forge fmt --check + forge build --sizes + forge test + + - name: Check IOpenVmHalo2Verifier.sol abi correctness + working-directory: crates/sdk/contracts + run: | + forge build + jq -S '.abi' ./out/IOpenVmHalo2Verifier.sol/IOpenVmHalo2Verifier.json > compiled_abi.json + jq -S . ./abi/IOpenVmHalo2Verifier.json > expected_abi_sorted.json + diff -u expected_abi_sorted.json compiled_abi.json + - name: Run openvm-sdk crate tests working-directory: crates/sdk run: | export RUST_BACKTRACE=1 cargo nextest run --cargo-profile=fast --test-threads=2 --features parallel + diff --git a/.github/workflows/vm.yml b/.github/workflows/vm.yml index 5ba2249c9d..cb7f2284ca 100644 --- a/.github/workflows/vm.yml +++ b/.github/workflows/vm.yml @@ -10,6 +10,7 @@ on: - "crates/vm/**" - "extensions/native/**" - "Cargo.toml" + - ".github/workflows/vm.yml" concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} diff --git a/.gitignore b/.gitignore index 6c53953538..35f726fed6 100644 --- a/.gitignore +++ b/.gitignore @@ -22,6 +22,7 @@ rustc-* # Bench metrics .bench_metrics/ __pycache__/ +metrics.json # KZG trusted setup **/params @@ -32,3 +33,6 @@ __pycache__/ # Profiling **/flamegraph.svg **/profile.json + +# openvm generated files +crates/cli/openvm/ diff --git a/.gitmodules b/.gitmodules index ab8a22bb42..20dc59a148 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ [submodule "toolchain/tests/rv32im-test-vectors/riscv-tests"] path = crates/toolchain/tests/rv32im-test-vectors/riscv-tests url = https://github.com/riscv-software-src/riscv-tests.git +[submodule "crates/sdk/contracts/lib/forge-std"] + path = crates/sdk/contracts/lib/forge-std + url = https://github.com/foundry-rs/forge-std diff --git a/Cargo.lock b/Cargo.lock index 2475913852..44bf4d329b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -79,17 +79,29 @@ dependencies = [ "serde", ] +[[package]] +name = "alloy-json-abi" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe6beff64ad0aa6ad1019a3db26fef565aefeb011736150ab73ed3366c3cfd1b" +dependencies = [ + "alloy-primitives", + "alloy-sol-type-parser", + "serde", + "serde_json", +] + [[package]] name = "alloy-primitives" -version = "0.8.21" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478bedf4d24e71ea48428d1bc278553bd7c6ae07c30ca063beb0b09fe58a9e74" +checksum = "8c77490fe91a0ce933a1f219029521f20fc28c2c0ca95d53fa4da9c00b8d9d4e" dependencies = [ "alloy-rlp", "bytes", "cfg-if", "const-hex", - "derive_more 1.0.0", + "derive_more 2.0.1", "foldhash", "hashbrown 0.15.2", "indexmap 2.7.1", @@ -128,6 +140,80 @@ dependencies = [ "syn 2.0.98", ] +[[package]] +name = "alloy-sol-macro" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10ae8e9a91d328ae954c22542415303919aabe976fe7a92eb06db1b68fd59f2" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83ad5da86c127751bc607c174d6c9fe9b85ef0889a9ca0c641735d77d4f98f26" +dependencies = [ + "alloy-json-abi", + "alloy-sol-macro-input", + "const-hex", + "heck", + "indexmap 2.7.1", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.98", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3d30f0d3f9ba3b7686f3ff1de9ee312647aac705604417a2f40c604f409a9e" +dependencies = [ + "alloy-json-abi", + "const-hex", + "dunce", + "heck", + "macro-string", + "proc-macro2", + "quote", + "serde_json", + "syn 2.0.98", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-type-parser" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d162f8524adfdfb0e4bd0505c734c985f3e2474eb022af32eef0d52a4f3935c" +dependencies = [ + "serde", + "winnow", +] + +[[package]] +name = "alloy-sol-types" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d43d5e60466a440230c07761aa67671d4719d46f43be8ea6e7ed334d8db4a9ab" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-macro", + "const-hex", + "serde", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -1033,6 +1119,9 @@ name = "bytes" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" +dependencies = [ + "serde", +] [[package]] name = "bytes-utils" @@ -1074,6 +1163,7 @@ version = "1.0.0" dependencies = [ "aws-config", "aws-sdk-s3", + "bitcode", "clap", "eyre", "hex", @@ -1406,9 +1496,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.14" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" dependencies = [ "crossbeam-utils", ] @@ -1604,7 +1694,16 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" dependencies = [ - "derive_more-impl", + "derive_more-impl 1.0.0", +] + +[[package]] +name = "derive_more" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl 2.0.1", ] [[package]] @@ -1619,6 +1718,18 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "derive_more-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", + "unicode-xid", +] + [[package]] name = "digest" version = "0.9.0" @@ -1657,6 +1768,12 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "dyn-clone" version = "1.0.18" @@ -1815,7 +1932,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -2313,6 +2430,7 @@ dependencies = [ "allocator-api2", "equivalent", "foldhash", + "serde", ] [[package]] @@ -2689,7 +2807,7 @@ checksum = "e19b23d53f35ce9f56aebc7d1bb4e6ac1e9c0db7ac85c8d1760c04379edced37" dependencies = [ "hermit-abi 0.4.0", "libc", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -2890,6 +3008,17 @@ dependencies = [ "hashbrown 0.15.2", ] +[[package]] +name = "macro-string" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + [[package]] name = "matchers" version = "0.1.0" @@ -3298,11 +3427,32 @@ dependencies = [ ] [[package]] -name = "openvm-benchmarks" +name = "openvm-benchmarks-execute" version = "1.0.0" dependencies = [ + "cargo-openvm", "clap", "criterion", + "derive_more 1.0.0", + "eyre", + "openvm-benchmarks-utils", + "openvm-circuit", + "openvm-keccak256-circuit", + "openvm-keccak256-transpiler", + "openvm-rv32im-circuit", + "openvm-rv32im-transpiler", + "openvm-sdk", + "openvm-stark-sdk", + "openvm-transpiler", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "openvm-benchmarks-prove" +version = "1.0.0" +dependencies = [ + "clap", "derive-new 0.6.0", "derive_more 1.0.0", "eyre", @@ -3310,7 +3460,7 @@ dependencies = [ "num-bigint 0.4.6", "openvm-algebra-circuit", "openvm-algebra-transpiler", - "openvm-build", + "openvm-benchmarks-utils", "openvm-circuit", "openvm-ecc-circuit", "openvm-ecc-transpiler", @@ -3329,12 +3479,25 @@ dependencies = [ "openvm-transpiler", "rand_chacha", "serde", - "tempfile", "tiny-keccak", "tokio", "tracing", ] +[[package]] +name = "openvm-benchmarks-utils" +version = "1.0.0" +dependencies = [ + "cargo_metadata", + "clap", + "eyre", + "openvm-build", + "openvm-transpiler", + "tempfile", + "tracing", + "tracing-subscriber", +] + [[package]] name = "openvm-bigint-circuit" version = "1.0.0" @@ -4037,6 +4200,8 @@ dependencies = [ name = "openvm-sdk" version = "1.0.0" dependencies = [ + "alloy-primitives", + "alloy-sol-types", "async-trait", "bitcode", "bon", @@ -4045,6 +4210,7 @@ dependencies = [ "derive_more 1.0.0", "eyre", "getset", + "hex", "itertools 0.14.0", "metrics", "openvm", @@ -4075,6 +4241,9 @@ dependencies = [ "serde", "serde_json", "serde_with", + "snark-verifier", + "snark-verifier-sdk", + "tempfile", "thiserror 1.0.69", "tracing", ] @@ -4152,7 +4321,7 @@ dependencies = [ [[package]] name = "openvm-stark-backend" version = "1.0.0" -source = "git+https://github.com/openvm-org/stark-backend.git?tag=v1.0.0#884f8e6aabf72bde00dc51f1f1121277bff73b1e" +source = "git+https://github.com/openvm-org/stark-backend.git?rev=93c0a91f3f17a21ee090d0578d45d6687fcdfc98#93c0a91f3f17a21ee090d0578d45d6687fcdfc98" dependencies = [ "bitcode", "cfg-if", @@ -4180,7 +4349,7 @@ dependencies = [ [[package]] name = "openvm-stark-sdk" version = "1.0.0" -source = "git+https://github.com/openvm-org/stark-backend.git?tag=v1.0.0#884f8e6aabf72bde00dc51f1f1121277bff73b1e" +source = "git+https://github.com/openvm-org/stark-backend.git?rev=93c0a91f3f17a21ee090d0578d45d6687fcdfc98#93c0a91f3f17a21ee090d0578d45d6687fcdfc98" dependencies = [ "derivative", "derive_more 0.99.19", @@ -4224,7 +4393,6 @@ dependencies = [ "openvm-bigint-circuit", "openvm-build", "openvm-circuit", - "openvm-circuit-primitives-derive", "openvm-ecc-guest", "openvm-instructions", "openvm-platform", @@ -4956,6 +5124,7 @@ dependencies = [ "libc", "rand_chacha", "rand_core", + "serde", ] [[package]] @@ -5289,7 +5458,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -5862,6 +6031,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn-solidity" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4560533fbd6914b94a8fb5cc803ed6801c3455668db3b810702c57612bac9412" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.98", +] + [[package]] name = "synstructure" version = "0.13.1" @@ -5896,7 +6077,7 @@ dependencies = [ "getrandom 0.3.1", "once_cell", "rustix", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -6097,9 +6278,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.43.0" +version = "1.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +checksum = "492a604e2fd7f814268a378409e6c92b5525d747d10db9a229723f55a417958c" dependencies = [ "backtrace", "bytes", @@ -6551,7 +6732,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 943b8d798e..65010cbe8f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,9 @@ license = "MIT" [workspace] members = [ - "benchmarks", + "benchmarks/utils", + "benchmarks/execute", + "benchmarks/prove", "crates/prof", "crates/sdk", "crates/cli", @@ -105,8 +107,8 @@ lto = "thin" [workspace.dependencies] # Stark Backend -openvm-stark-backend = { git = "https://github.com/openvm-org/stark-backend.git", tag = "v1.0.0", default-features = false } -openvm-stark-sdk = { git = "https://github.com/openvm-org/stark-backend.git", tag = "v1.0.0", default-features = false } +openvm-stark-backend = { git = "https://github.com/openvm-org/stark-backend.git", rev = "93c0a91f3f17a21ee090d0578d45d6687fcdfc98", default-features = false } +openvm-stark-sdk = { git = "https://github.com/openvm-org/stark-backend.git", rev = "93c0a91f3f17a21ee090d0578d45d6687fcdfc98", default-features = false } # OpenVM openvm-sdk = { path = "crates/sdk", default-features = false } @@ -127,6 +129,7 @@ openvm-custom-insn = { path = "crates/toolchain/custom_insn", default-features = openvm-circuit = { path = "crates/vm", default-features = false } openvm-circuit-derive = { path = "crates/vm/derive", default-features = false } openvm-continuations = { path = "crates/continuations", default-features = false } +cargo-openvm = { path = "crates/cli", default-features = false } # Extensions openvm-rv32im-circuit = { path = "extensions/rv32im/circuit", default-features = false } @@ -159,6 +162,9 @@ openvm-pairing-circuit = { path = "extensions/pairing/circuit", default-features openvm-pairing-transpiler = { path = "extensions/pairing/transpiler", default-features = false } openvm-pairing-guest = { path = "extensions/pairing/guest", default-features = false } +# Benchmarking +openvm-benchmarks-utils = { path = "benchmarks/utils", default-features = false } + # Plonky3 p3-field = { git = "https://github.com/Plonky3/Plonky3.git", rev = "1ba4e5c" } p3-baby-bear = { git = "https://github.com/Plonky3/Plonky3.git", features = [ @@ -178,8 +184,12 @@ snark-verifier-sdk = { version = "0.2.0", default-features = false, features = [ "loader_halo2", "halo2-axiom", ] } +snark-verifier = { version = "0.2.0", default-features = false } halo2curves-axiom = "0.7.0" +cargo_metadata = "0.18" +alloy-primitives = "0.8.25" +alloy-sol-types = "0.8.25" tracing = "0.1.40" bon = "3.2.0" serde_json = "1.0.117" diff --git a/README.md b/README.md index 1544b0ff89..07b165b290 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,7 @@ | [User Book](https://book.openvm.dev) | [Contributor Docs](./docs) | [Crate Docs](https://docs.openvm.dev/openvm) +| [Whitepaper](https://openvm.dev/whitepaper.pdf) [tg-badge]: https://img.shields.io/endpoint?color=neon&logo=telegram&label=chat&url=https://tg.sumanjay.workers.dev/openvm diff --git a/benchmarks/execute/Cargo.toml b/benchmarks/execute/Cargo.toml new file mode 100644 index 0000000000..319490220a --- /dev/null +++ b/benchmarks/execute/Cargo.toml @@ -0,0 +1,49 @@ +[package] +name = "openvm-benchmarks-execute" +version.workspace = true +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true + +[dependencies] +openvm-benchmarks-utils.workspace = true +cargo-openvm.workspace = true +openvm-circuit.workspace = true +openvm-sdk.workspace = true +openvm-stark-sdk.workspace = true +openvm-transpiler.workspace = true +openvm-rv32im-circuit.workspace = true +openvm-rv32im-transpiler.workspace = true +openvm-keccak256-circuit.workspace = true +openvm-keccak256-transpiler.workspace = true + +clap = { version = "4.5.9", features = ["derive", "env"] } +eyre.workspace = true +tracing.workspace = true +derive_more = { workspace = true, features = ["from"] } + +tracing-subscriber = { version = "0.3.17", features = ["std", "env-filter"] } + +[dev-dependencies] +criterion = { version = "0.5", features = ["html_reports"] } + +[features] +default = ["jemalloc"] +profiling = ["openvm-sdk/profiling"] +mimalloc = ["openvm-circuit/mimalloc"] +jemalloc = ["openvm-circuit/jemalloc"] +jemalloc-prof = ["openvm-circuit/jemalloc-prof"] +nightly-features = ["openvm-circuit/nightly-features"] + +[[bench]] +name = "fibonacci_execute" +harness = false + +[[bench]] +name = "regex_execute" +harness = false + +[package.metadata.cargo-shear] +ignored = ["derive_more"] diff --git a/benchmarks/benches/fibonacci_execute.rs b/benchmarks/execute/benches/fibonacci_execute.rs similarity index 87% rename from benchmarks/benches/fibonacci_execute.rs rename to benchmarks/execute/benches/fibonacci_execute.rs index 5450c4c749..70952b53c9 100644 --- a/benchmarks/benches/fibonacci_execute.rs +++ b/benchmarks/execute/benches/fibonacci_execute.rs @@ -1,5 +1,5 @@ use criterion::{criterion_group, criterion_main, Criterion}; -use openvm_benchmarks::utils::{build_bench, get_programs_dir}; +use openvm_benchmarks_utils::{build_elf, get_programs_dir}; use openvm_circuit::arch::{instructions::exe::VmExe, VmExecutor}; use openvm_rv32im_circuit::Rv32ImConfig; use openvm_rv32im_transpiler::{ @@ -10,7 +10,9 @@ use openvm_stark_sdk::p3_baby_bear::BabyBear; use openvm_transpiler::{transpiler::Transpiler, FromElf}; fn benchmark_function(c: &mut Criterion) { - let elf = build_bench(get_programs_dir().join("fibonacci"), "release").unwrap(); + let program_dir = get_programs_dir().join("fibonacci"); + let elf = build_elf(&program_dir, "release").unwrap(); + let exe = VmExe::from_elf( elf, Transpiler::::default() diff --git a/benchmarks/benches/regex_execute.rs b/benchmarks/execute/benches/regex_execute.rs similarity index 85% rename from benchmarks/benches/regex_execute.rs rename to benchmarks/execute/benches/regex_execute.rs index 08b555ec4a..a3a110e344 100644 --- a/benchmarks/benches/regex_execute.rs +++ b/benchmarks/execute/benches/regex_execute.rs @@ -1,5 +1,5 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use openvm_benchmarks::utils::{build_bench, get_programs_dir}; +use openvm_benchmarks_utils::{build_elf, get_programs_dir}; use openvm_circuit::arch::{instructions::exe::VmExe, VmExecutor}; use openvm_keccak256_circuit::Keccak256Rv32Config; use openvm_keccak256_transpiler::Keccak256TranspilerExtension; @@ -11,7 +11,9 @@ use openvm_stark_sdk::p3_baby_bear::BabyBear; use openvm_transpiler::{transpiler::Transpiler, FromElf}; fn benchmark_function(c: &mut Criterion) { - let elf = build_bench(get_programs_dir().join("regex"), "release").unwrap(); + let program_dir = get_programs_dir().join("regex"); + let elf = build_elf(&program_dir, "release").unwrap(); + let exe = VmExe::from_elf( elf, Transpiler::::default() @@ -27,7 +29,7 @@ fn benchmark_function(c: &mut Criterion) { let config = Keccak256Rv32Config::default(); let executor = VmExecutor::::new(config); - let data = include_str!("../programs/regex/regex_email.txt"); + let data = include_str!("../../guest/regex/regex_email.txt"); let fe_bytes = data.to_owned().into_bytes(); group.bench_function("execute", |b| { diff --git a/benchmarks/examples/regex-elf b/benchmarks/execute/examples/regex-elf similarity index 100% rename from benchmarks/examples/regex-elf rename to benchmarks/execute/examples/regex-elf diff --git a/benchmarks/examples/regex_execute.rs b/benchmarks/execute/examples/regex_execute.rs similarity index 95% rename from benchmarks/examples/regex_execute.rs rename to benchmarks/execute/examples/regex_execute.rs index ea59f0c7a1..59705a19fd 100644 --- a/benchmarks/examples/regex_execute.rs +++ b/benchmarks/execute/examples/regex_execute.rs @@ -25,7 +25,7 @@ fn main() { let config = Keccak256Rv32Config::default(); let executor = VmExecutor::::new(config); - let data = include_str!("../programs/regex/regex_email.txt"); + let data = include_str!("../../guest/regex/regex_email.txt"); let timer = std::time::Instant::now(); executor diff --git a/benchmarks/execute/src/main.rs b/benchmarks/execute/src/main.rs new file mode 100644 index 0000000000..80db3ec5a4 --- /dev/null +++ b/benchmarks/execute/src/main.rs @@ -0,0 +1,121 @@ +use cargo_openvm::{default::DEFAULT_APP_CONFIG_PATH, util::read_config_toml_or_default}; +use clap::{Parser, ValueEnum}; +use eyre::Result; +use openvm_benchmarks_utils::{get_elf_path, get_programs_dir, read_elf_file}; +use openvm_circuit::arch::{instructions::exe::VmExe, VmExecutor}; +use openvm_sdk::StdIn; +use openvm_stark_sdk::bench::run_with_metric_collection; +use openvm_transpiler::FromElf; + +#[derive(Debug, Clone, ValueEnum)] +enum BuildProfile { + Debug, + Release, +} + +static AVAILABLE_PROGRAMS: &[&str] = &[ + "fibonacci_recursive", + "fibonacci_iterative", + "quicksort", + "bubblesort", + "pairing", + "keccak256", + "keccak256_iter", + "sha256", + "sha256_iter", + "revm_transfer", + "revm_snailtracer", +]; + +#[derive(Parser)] +#[command(author, version, about = "OpenVM Benchmark CLI", long_about = None)] +struct Cli { + /// Programs to benchmark (if not specified, all programs will be run) + #[arg(short, long)] + programs: Vec, + + /// Programs to skip from benchmarking + #[arg(short, long)] + skip: Vec, + + /// Output path for benchmark results + #[arg(short, long, default_value = "OUTPUT_PATH")] + output: String, + + /// List available benchmark programs and exit + #[arg(short, long)] + list: bool, + + /// Verbose output + #[arg(short, long)] + verbose: bool, +} + +fn main() -> Result<()> { + let cli = Cli::parse(); + + if cli.list { + println!("Available benchmark programs:"); + for program in AVAILABLE_PROGRAMS { + println!(" {}", program); + } + return Ok(()); + } + + // Set up logging based on verbosity + if cli.verbose { + tracing_subscriber::fmt::init(); + } + + let mut programs_to_run = if cli.programs.is_empty() { + AVAILABLE_PROGRAMS.to_vec() + } else { + // Validate provided programs + for program in &cli.programs { + if !AVAILABLE_PROGRAMS.contains(&program.as_str()) { + eprintln!("Unknown program: {}", program); + eprintln!("Use --list to see available programs"); + std::process::exit(1); + } + } + cli.programs.iter().map(|s| s.as_str()).collect() + }; + + // Remove programs that should be skipped + if !cli.skip.is_empty() { + // Validate skipped programs + for program in &cli.skip { + if !AVAILABLE_PROGRAMS.contains(&program.as_str()) { + eprintln!("Unknown program to skip: {}", program); + eprintln!("Use --list to see available programs"); + std::process::exit(1); + } + } + + let skip_set: Vec<&str> = cli.skip.iter().map(|s| s.as_str()).collect(); + programs_to_run.retain(|&program| !skip_set.contains(&program)); + } + + tracing::info!("Starting benchmarks with metric collection"); + + run_with_metric_collection(&cli.output, || -> Result<()> { + for program in &programs_to_run { + tracing::info!("Running program: {}", program); + + let program_dir = get_programs_dir().join(program); + let elf_path = get_elf_path(&program_dir); + let elf = read_elf_file(&elf_path)?; + + let config_path = program_dir.join(DEFAULT_APP_CONFIG_PATH); + let vm_config = read_config_toml_or_default(&config_path)?.app_vm_config; + + let exe = VmExe::from_elf(elf, vm_config.transpiler())?; + + let executor = VmExecutor::new(vm_config); + executor.execute(exe, StdIn::default())?; + tracing::info!("Completed program: {}", program); + } + tracing::info!("All programs executed successfully"); + Ok(()) + }) +} diff --git a/benchmarks/programs/README.md b/benchmarks/guest/README.md similarity index 100% rename from benchmarks/programs/README.md rename to benchmarks/guest/README.md diff --git a/benchmarks/programs/base64_json/Cargo.toml b/benchmarks/guest/base64_json/Cargo.toml similarity index 100% rename from benchmarks/programs/base64_json/Cargo.toml rename to benchmarks/guest/base64_json/Cargo.toml diff --git a/benchmarks/guest/base64_json/elf/openvm-json-program.elf b/benchmarks/guest/base64_json/elf/openvm-json-program.elf new file mode 100755 index 0000000000..29e6cac131 Binary files /dev/null and b/benchmarks/guest/base64_json/elf/openvm-json-program.elf differ diff --git a/benchmarks/programs/base64_json/json_payload.txt b/benchmarks/guest/base64_json/json_payload.txt similarity index 100% rename from benchmarks/programs/base64_json/json_payload.txt rename to benchmarks/guest/base64_json/json_payload.txt diff --git a/benchmarks/programs/base64_json/json_payload_encoded.txt b/benchmarks/guest/base64_json/json_payload_encoded.txt similarity index 100% rename from benchmarks/programs/base64_json/json_payload_encoded.txt rename to benchmarks/guest/base64_json/json_payload_encoded.txt diff --git a/benchmarks/guest/base64_json/openvm.toml b/benchmarks/guest/base64_json/openvm.toml new file mode 100644 index 0000000000..19a1e670e5 --- /dev/null +++ b/benchmarks/guest/base64_json/openvm.toml @@ -0,0 +1,3 @@ +[app_vm_config.rv32i] +[app_vm_config.rv32m] +[app_vm_config.io] diff --git a/benchmarks/programs/base64_json/src/lib.rs b/benchmarks/guest/base64_json/src/lib.rs similarity index 100% rename from benchmarks/programs/base64_json/src/lib.rs rename to benchmarks/guest/base64_json/src/lib.rs diff --git a/benchmarks/programs/base64_json/src/main.rs b/benchmarks/guest/base64_json/src/main.rs similarity index 100% rename from benchmarks/programs/base64_json/src/main.rs rename to benchmarks/guest/base64_json/src/main.rs diff --git a/benchmarks/programs/bincode/Cargo.toml b/benchmarks/guest/bincode/Cargo.toml similarity index 100% rename from benchmarks/programs/bincode/Cargo.toml rename to benchmarks/guest/bincode/Cargo.toml diff --git a/benchmarks/guest/bincode/elf/openvm-bincode-program.elf b/benchmarks/guest/bincode/elf/openvm-bincode-program.elf new file mode 100755 index 0000000000..085eb7ee4f Binary files /dev/null and b/benchmarks/guest/bincode/elf/openvm-bincode-program.elf differ diff --git a/benchmarks/programs/bincode/minecraft_savedata.bin b/benchmarks/guest/bincode/minecraft_savedata.bin similarity index 100% rename from benchmarks/programs/bincode/minecraft_savedata.bin rename to benchmarks/guest/bincode/minecraft_savedata.bin diff --git a/benchmarks/guest/bincode/openvm.toml b/benchmarks/guest/bincode/openvm.toml new file mode 100644 index 0000000000..19a1e670e5 --- /dev/null +++ b/benchmarks/guest/bincode/openvm.toml @@ -0,0 +1,3 @@ +[app_vm_config.rv32i] +[app_vm_config.rv32m] +[app_vm_config.io] diff --git a/benchmarks/programs/bincode/src/generate.rs b/benchmarks/guest/bincode/src/generate.rs similarity index 100% rename from benchmarks/programs/bincode/src/generate.rs rename to benchmarks/guest/bincode/src/generate.rs diff --git a/benchmarks/programs/bincode/src/main.rs b/benchmarks/guest/bincode/src/main.rs similarity index 100% rename from benchmarks/programs/bincode/src/main.rs rename to benchmarks/guest/bincode/src/main.rs diff --git a/benchmarks/programs/bincode/src/types.rs b/benchmarks/guest/bincode/src/types.rs similarity index 100% rename from benchmarks/programs/bincode/src/types.rs rename to benchmarks/guest/bincode/src/types.rs diff --git a/benchmarks/guest/bubblesort/Cargo.toml b/benchmarks/guest/bubblesort/Cargo.toml new file mode 100644 index 0000000000..68a0af82ff --- /dev/null +++ b/benchmarks/guest/bubblesort/Cargo.toml @@ -0,0 +1,16 @@ +[workspace] +[package] +name = "openvm-bubblesort-program" +version = "0.0.0" +edition = "2021" + +[dependencies] +openvm = { path = "../../../crates/toolchain/openvm", features = ["std"] } + +[features] +default = [] + +[profile.profiling] +inherits = "release" +debug = 2 +strip = false diff --git a/benchmarks/guest/bubblesort/elf/openvm-bubblesort-program.elf b/benchmarks/guest/bubblesort/elf/openvm-bubblesort-program.elf new file mode 100755 index 0000000000..0f81a3926f Binary files /dev/null and b/benchmarks/guest/bubblesort/elf/openvm-bubblesort-program.elf differ diff --git a/benchmarks/guest/bubblesort/openvm.toml b/benchmarks/guest/bubblesort/openvm.toml new file mode 100644 index 0000000000..19a1e670e5 --- /dev/null +++ b/benchmarks/guest/bubblesort/openvm.toml @@ -0,0 +1,3 @@ +[app_vm_config.rv32i] +[app_vm_config.rv32m] +[app_vm_config.io] diff --git a/benchmarks/guest/bubblesort/src/main.rs b/benchmarks/guest/bubblesort/src/main.rs new file mode 100644 index 0000000000..0dd7e51146 --- /dev/null +++ b/benchmarks/guest/bubblesort/src/main.rs @@ -0,0 +1,46 @@ +use core::hint::black_box; +use openvm as _; + +const ARRAY_SIZE: usize = 100; + +fn bubblesort(arr: &mut [T]) { + let len = arr.len(); + if len <= 1 { + return; + } + + for i in 0..len { + let mut swapped = false; + for j in 0..len - i - 1 { + if arr[j] > arr[j + 1] { + arr.swap(j, j + 1); + swapped = true; + } + } + // If no swapping occurred in this pass, array is sorted + if !swapped { + break; + } + } +} + +pub fn main() { + // Generate array of random-like values + let mut arr = Vec::with_capacity(ARRAY_SIZE); + + // Initialize with pseudo-random values + let mut val = 1; + for _ in 0..ARRAY_SIZE { + arr.push(val); + val = ((val * 8191) << 7) ^ val; + } + + // Prevent compiler from optimizing away the computation + let mut input = black_box(arr); + + // Sort the array + bubblesort(&mut input); + + // Prevent compiler from optimizing away the computation + black_box(&input); +} diff --git a/benchmarks/programs/ecrecover/Cargo.toml b/benchmarks/guest/ecrecover/Cargo.toml similarity index 100% rename from benchmarks/programs/ecrecover/Cargo.toml rename to benchmarks/guest/ecrecover/Cargo.toml diff --git a/benchmarks/guest/ecrecover/elf/openvm-ecdsa-recover-key-program.elf b/benchmarks/guest/ecrecover/elf/openvm-ecdsa-recover-key-program.elf new file mode 100755 index 0000000000..4e54268ea4 Binary files /dev/null and b/benchmarks/guest/ecrecover/elf/openvm-ecdsa-recover-key-program.elf differ diff --git a/benchmarks/programs/ecrecover/src/main.rs b/benchmarks/guest/ecrecover/src/main.rs similarity index 100% rename from benchmarks/programs/ecrecover/src/main.rs rename to benchmarks/guest/ecrecover/src/main.rs diff --git a/benchmarks/programs/fibonacci/Cargo.toml b/benchmarks/guest/fibonacci/Cargo.toml similarity index 100% rename from benchmarks/programs/fibonacci/Cargo.toml rename to benchmarks/guest/fibonacci/Cargo.toml diff --git a/benchmarks/guest/fibonacci/elf/openvm-fibonacci-program.elf b/benchmarks/guest/fibonacci/elf/openvm-fibonacci-program.elf new file mode 100755 index 0000000000..36ad8d359c Binary files /dev/null and b/benchmarks/guest/fibonacci/elf/openvm-fibonacci-program.elf differ diff --git a/benchmarks/guest/fibonacci/openvm.toml b/benchmarks/guest/fibonacci/openvm.toml new file mode 100644 index 0000000000..19a1e670e5 --- /dev/null +++ b/benchmarks/guest/fibonacci/openvm.toml @@ -0,0 +1,3 @@ +[app_vm_config.rv32i] +[app_vm_config.rv32m] +[app_vm_config.io] diff --git a/benchmarks/programs/fibonacci/src/main.rs b/benchmarks/guest/fibonacci/src/main.rs similarity index 100% rename from benchmarks/programs/fibonacci/src/main.rs rename to benchmarks/guest/fibonacci/src/main.rs diff --git a/benchmarks/guest/fibonacci_iterative/Cargo.toml b/benchmarks/guest/fibonacci_iterative/Cargo.toml new file mode 100644 index 0000000000..6f0c145061 --- /dev/null +++ b/benchmarks/guest/fibonacci_iterative/Cargo.toml @@ -0,0 +1,16 @@ +[workspace] +[package] +name = "openvm-fibonacci-iterative-program" +version = "0.0.0" +edition = "2021" + +[dependencies] +openvm = { path = "../../../crates/toolchain/openvm", features = ["std"] } + +[features] +default = [] + +[profile.profiling] +inherits = "release" +debug = 2 +strip = false diff --git a/benchmarks/guest/fibonacci_iterative/elf/openvm-fibonacci-iterative-program.elf b/benchmarks/guest/fibonacci_iterative/elf/openvm-fibonacci-iterative-program.elf new file mode 100755 index 0000000000..ac9fbf3e89 Binary files /dev/null and b/benchmarks/guest/fibonacci_iterative/elf/openvm-fibonacci-iterative-program.elf differ diff --git a/benchmarks/guest/fibonacci_iterative/openvm.toml b/benchmarks/guest/fibonacci_iterative/openvm.toml new file mode 100644 index 0000000000..19a1e670e5 --- /dev/null +++ b/benchmarks/guest/fibonacci_iterative/openvm.toml @@ -0,0 +1,3 @@ +[app_vm_config.rv32i] +[app_vm_config.rv32m] +[app_vm_config.io] diff --git a/benchmarks/guest/fibonacci_iterative/src/main.rs b/benchmarks/guest/fibonacci_iterative/src/main.rs new file mode 100644 index 0000000000..09ceb5df41 --- /dev/null +++ b/benchmarks/guest/fibonacci_iterative/src/main.rs @@ -0,0 +1,15 @@ +use core::hint::black_box; +use openvm as _; + +const N: u64 = 100_000; + +pub fn main() { + let mut a: u64 = 0; + let mut b: u64 = 1; + for _ in 0..black_box(N) { + let c: u64 = a.wrapping_add(b); + a = b; + b = c; + } + black_box(a); +} diff --git a/benchmarks/guest/fibonacci_recursive/Cargo.toml b/benchmarks/guest/fibonacci_recursive/Cargo.toml new file mode 100644 index 0000000000..95b124df43 --- /dev/null +++ b/benchmarks/guest/fibonacci_recursive/Cargo.toml @@ -0,0 +1,16 @@ +[workspace] +[package] +name = "openvm-fibonacci-recursive-program" +version = "0.0.0" +edition = "2021" + +[dependencies] +openvm = { path = "../../../crates/toolchain/openvm", features = ["std"] } + +[features] +default = [] + +[profile.profiling] +inherits = "release" +debug = 2 +strip = false diff --git a/benchmarks/guest/fibonacci_recursive/elf/openvm-fibonacci-recursive-program.elf b/benchmarks/guest/fibonacci_recursive/elf/openvm-fibonacci-recursive-program.elf new file mode 100755 index 0000000000..7dee9d4286 Binary files /dev/null and b/benchmarks/guest/fibonacci_recursive/elf/openvm-fibonacci-recursive-program.elf differ diff --git a/benchmarks/guest/fibonacci_recursive/openvm.toml b/benchmarks/guest/fibonacci_recursive/openvm.toml new file mode 100644 index 0000000000..19a1e670e5 --- /dev/null +++ b/benchmarks/guest/fibonacci_recursive/openvm.toml @@ -0,0 +1,3 @@ +[app_vm_config.rv32i] +[app_vm_config.rv32m] +[app_vm_config.io] diff --git a/benchmarks/guest/fibonacci_recursive/src/main.rs b/benchmarks/guest/fibonacci_recursive/src/main.rs new file mode 100644 index 0000000000..fae64a1b0f --- /dev/null +++ b/benchmarks/guest/fibonacci_recursive/src/main.rs @@ -0,0 +1,21 @@ +use core::hint::black_box; +use openvm as _; + +const N: u64 = 25; + +pub fn main() { + let n = black_box(N); + black_box(fibonacci(n)); +} + +fn fibonacci(n: u64) -> u64 { + if n == 0 { + 0 + } else if n == 1 { + 1 + } else { + let a = fibonacci(n - 2); + let b = fibonacci(n - 1); + a.wrapping_add(b) + } +} diff --git a/benchmarks/guest/keccak256/Cargo.toml b/benchmarks/guest/keccak256/Cargo.toml new file mode 100644 index 0000000000..d14a60111e --- /dev/null +++ b/benchmarks/guest/keccak256/Cargo.toml @@ -0,0 +1,17 @@ +[workspace] +[package] +name = "openvm-keccak256-program" +version = "0.0.0" +edition = "2021" + +[dependencies] +openvm = { path = "../../../crates/toolchain/openvm", features = ["std"] } +openvm-keccak256-guest = { path = "../../../extensions/keccak256/guest" } + +[features] +default = [] + +[profile.profiling] +inherits = "release" +debug = 2 +strip = false diff --git a/benchmarks/guest/keccak256/elf/openvm-keccak256-program.elf b/benchmarks/guest/keccak256/elf/openvm-keccak256-program.elf new file mode 100755 index 0000000000..7425897f99 Binary files /dev/null and b/benchmarks/guest/keccak256/elf/openvm-keccak256-program.elf differ diff --git a/benchmarks/guest/keccak256/openvm.toml b/benchmarks/guest/keccak256/openvm.toml new file mode 100644 index 0000000000..93998f52a3 --- /dev/null +++ b/benchmarks/guest/keccak256/openvm.toml @@ -0,0 +1,4 @@ +[app_vm_config.rv32i] +[app_vm_config.rv32m] +[app_vm_config.io] +[app_vm_config.keccak] diff --git a/benchmarks/guest/keccak256/src/main.rs b/benchmarks/guest/keccak256/src/main.rs new file mode 100644 index 0000000000..ee7ec8b09a --- /dev/null +++ b/benchmarks/guest/keccak256/src/main.rs @@ -0,0 +1,21 @@ +use core::hint::black_box; +use openvm as _; + +use openvm_keccak256_guest::keccak256; + +const INPUT_LENGTH_BYTES: usize = 100 * 1024; // 100 KB + +pub fn main() { + let mut input = Vec::with_capacity(INPUT_LENGTH_BYTES); + + // Initialize with pseudo-random values + let mut val: u64 = 1; + for _ in 0..INPUT_LENGTH_BYTES { + input.push(val as u8); + val = ((val.wrapping_mul(8191)) << 7) ^ val; + } + + // Prevent optimizer from optimizing away the computation + let input = black_box(input); + black_box(keccak256(&input)); +} diff --git a/benchmarks/guest/keccak256_iter/Cargo.toml b/benchmarks/guest/keccak256_iter/Cargo.toml new file mode 100644 index 0000000000..ae9291be0c --- /dev/null +++ b/benchmarks/guest/keccak256_iter/Cargo.toml @@ -0,0 +1,17 @@ +[workspace] +[package] +name = "openvm-keccak256-iter-program" +version = "0.0.0" +edition = "2021" + +[dependencies] +openvm = { path = "../../../crates/toolchain/openvm", features = ["std"] } +openvm-keccak256-guest = { path = "../../../extensions/keccak256/guest" } + +[features] +default = [] + +[profile.profiling] +inherits = "release" +debug = 2 +strip = false diff --git a/benchmarks/guest/keccak256_iter/elf/openvm-keccak256-iter-program.elf b/benchmarks/guest/keccak256_iter/elf/openvm-keccak256-iter-program.elf new file mode 100755 index 0000000000..0cf372eec3 Binary files /dev/null and b/benchmarks/guest/keccak256_iter/elf/openvm-keccak256-iter-program.elf differ diff --git a/benchmarks/guest/keccak256_iter/openvm.toml b/benchmarks/guest/keccak256_iter/openvm.toml new file mode 100644 index 0000000000..93998f52a3 --- /dev/null +++ b/benchmarks/guest/keccak256_iter/openvm.toml @@ -0,0 +1,4 @@ +[app_vm_config.rv32i] +[app_vm_config.rv32m] +[app_vm_config.io] +[app_vm_config.keccak] diff --git a/benchmarks/guest/keccak256_iter/src/main.rs b/benchmarks/guest/keccak256_iter/src/main.rs new file mode 100644 index 0000000000..7ef36a5fa7 --- /dev/null +++ b/benchmarks/guest/keccak256_iter/src/main.rs @@ -0,0 +1,19 @@ +use core::hint::black_box; +use openvm as _; + +use openvm_keccak256_guest::keccak256; + +const ITERATIONS: usize = 10_000; + +pub fn main() { + // Initialize with hash of an empty vector + let mut hash = black_box(keccak256(&vec![])); + + // Iteratively apply keccak256 + for _ in 0..ITERATIONS { + hash = keccak256(&hash); + } + + // Prevent optimizer from optimizing away the computation + black_box(hash); +} diff --git a/benchmarks/programs/kitchen-sink/Cargo.toml b/benchmarks/guest/kitchen-sink/Cargo.toml similarity index 100% rename from benchmarks/programs/kitchen-sink/Cargo.toml rename to benchmarks/guest/kitchen-sink/Cargo.toml diff --git a/benchmarks/guest/kitchen-sink/elf/openvm-kitchen-sink-program.elf b/benchmarks/guest/kitchen-sink/elf/openvm-kitchen-sink-program.elf new file mode 100755 index 0000000000..85f3509fa5 Binary files /dev/null and b/benchmarks/guest/kitchen-sink/elf/openvm-kitchen-sink-program.elf differ diff --git a/benchmarks/programs/kitchen-sink/src/main.rs b/benchmarks/guest/kitchen-sink/src/main.rs similarity index 98% rename from benchmarks/programs/kitchen-sink/src/main.rs rename to benchmarks/guest/kitchen-sink/src/main.rs index 8c12e165f7..6aa679eb3f 100644 --- a/benchmarks/programs/kitchen-sink/src/main.rs +++ b/benchmarks/guest/kitchen-sink/src/main.rs @@ -1,4 +1,4 @@ -use std::{hint::black_box, mem::transmute}; +use std::hint::black_box; use openvm_algebra_guest::IntMod; use openvm_bigint_guest::I256; diff --git a/benchmarks/programs/pairing/Cargo.toml b/benchmarks/guest/pairing/Cargo.toml similarity index 100% rename from benchmarks/programs/pairing/Cargo.toml rename to benchmarks/guest/pairing/Cargo.toml diff --git a/benchmarks/guest/pairing/elf/openvm-pairing-program.elf b/benchmarks/guest/pairing/elf/openvm-pairing-program.elf new file mode 100755 index 0000000000..bf30d5a003 Binary files /dev/null and b/benchmarks/guest/pairing/elf/openvm-pairing-program.elf differ diff --git a/benchmarks/guest/pairing/openvm.toml b/benchmarks/guest/pairing/openvm.toml new file mode 100644 index 0000000000..4b2dc738b3 --- /dev/null +++ b/benchmarks/guest/pairing/openvm.toml @@ -0,0 +1,25 @@ +[app_vm_config.rv32i] +[app_vm_config.rv32m] +[app_vm_config.io] +[app_vm_config.modular] +supported_modulus = [ + # bn254 (alt bn128) + "21888242871839275222246405745257275088696311157297823662689037894645226208583", # coordinate field + "21888242871839275222246405745257275088548364400416034343698204186575808495617", # scalar field +] + +[app_vm_config.fp2] +supported_modulus = [ + # bn254 (alt bn128) + "21888242871839275222246405745257275088696311157297823662689037894645226208583", +] + +[app_vm_config.pairing] +supported_curves = ["Bn254"] + +# bn254 (alt bn128) +[[app_vm_config.ecc.supported_curves]] +modulus = "21888242871839275222246405745257275088696311157297823662689037894645226208583" +scalar = "21888242871839275222246405745257275088548364400416034343698204186575808495617" +a = "0" +b = "3" diff --git a/benchmarks/programs/pairing/src/main.rs b/benchmarks/guest/pairing/src/main.rs similarity index 100% rename from benchmarks/programs/pairing/src/main.rs rename to benchmarks/guest/pairing/src/main.rs diff --git a/benchmarks/guest/quicksort/Cargo.toml b/benchmarks/guest/quicksort/Cargo.toml new file mode 100644 index 0000000000..8556264be0 --- /dev/null +++ b/benchmarks/guest/quicksort/Cargo.toml @@ -0,0 +1,16 @@ +[workspace] +[package] +name = "openvm-quicksort-program" +version = "0.0.0" +edition = "2021" + +[dependencies] +openvm = { path = "../../../crates/toolchain/openvm", features = ["std"] } + +[features] +default = [] + +[profile.profiling] +inherits = "release" +debug = 2 +strip = false diff --git a/benchmarks/guest/quicksort/elf/openvm-quicksort-program.elf b/benchmarks/guest/quicksort/elf/openvm-quicksort-program.elf new file mode 100755 index 0000000000..54af6272d6 Binary files /dev/null and b/benchmarks/guest/quicksort/elf/openvm-quicksort-program.elf differ diff --git a/benchmarks/guest/quicksort/openvm.toml b/benchmarks/guest/quicksort/openvm.toml new file mode 100644 index 0000000000..19a1e670e5 --- /dev/null +++ b/benchmarks/guest/quicksort/openvm.toml @@ -0,0 +1,3 @@ +[app_vm_config.rv32i] +[app_vm_config.rv32m] +[app_vm_config.io] diff --git a/benchmarks/guest/quicksort/src/main.rs b/benchmarks/guest/quicksort/src/main.rs new file mode 100644 index 0000000000..30218cf40e --- /dev/null +++ b/benchmarks/guest/quicksort/src/main.rs @@ -0,0 +1,64 @@ +use core::hint::black_box; +use openvm as _; + +const ARRAY_SIZE: usize = 1_000; + +fn quicksort(arr: &mut [T]) { + if arr.len() <= 1 { + return; + } + + let pivot_index = partition(arr); + + // Sort left part + quicksort(&mut arr[0..pivot_index]); + // Sort right part + quicksort(&mut arr[pivot_index + 1..]); +} + +fn partition(arr: &mut [T]) -> usize { + let len = arr.len(); + if len <= 1 { + return 0; + } + + // Choose pivot (middle element) + let pivot_index = len / 2; + + // Move pivot to the end + arr.swap(pivot_index, len - 1); + + // Partition + let mut store_index = 0; + for i in 0..len - 1 { + if arr[i] < arr[len - 1] { + arr.swap(i, store_index); + store_index += 1; + } + } + + // Move pivot to its final place + arr.swap(store_index, len - 1); + store_index +} + +pub fn main() { + // Generate array of random-like values + let mut arr = Vec::with_capacity(ARRAY_SIZE); + + // Initialize with pseudo-random values + let mut val = 1; + for _ in 0..ARRAY_SIZE { + arr.push(val); + val = ((val * 8191) << 7) ^ val; + } + + // Prevent compiler from optimizing away the computation + let mut input = black_box(arr); + + // Sort the array + quicksort(&mut input); + + // Prevent compiler from optimizing away the computation + black_box(&input); +} diff --git a/benchmarks/programs/regex/Cargo.toml b/benchmarks/guest/regex/Cargo.toml similarity index 100% rename from benchmarks/programs/regex/Cargo.toml rename to benchmarks/guest/regex/Cargo.toml diff --git a/benchmarks/guest/regex/elf/openvm-regex-program.elf b/benchmarks/guest/regex/elf/openvm-regex-program.elf new file mode 100755 index 0000000000..6e6074e079 Binary files /dev/null and b/benchmarks/guest/regex/elf/openvm-regex-program.elf differ diff --git a/benchmarks/guest/regex/openvm.toml b/benchmarks/guest/regex/openvm.toml new file mode 100644 index 0000000000..93998f52a3 --- /dev/null +++ b/benchmarks/guest/regex/openvm.toml @@ -0,0 +1,4 @@ +[app_vm_config.rv32i] +[app_vm_config.rv32m] +[app_vm_config.io] +[app_vm_config.keccak] diff --git a/benchmarks/programs/regex/regex_email.txt b/benchmarks/guest/regex/regex_email.txt similarity index 100% rename from benchmarks/programs/regex/regex_email.txt rename to benchmarks/guest/regex/regex_email.txt diff --git a/benchmarks/programs/regex/src/main.rs b/benchmarks/guest/regex/src/main.rs similarity index 100% rename from benchmarks/programs/regex/src/main.rs rename to benchmarks/guest/regex/src/main.rs diff --git a/benchmarks/guest/revm_snailtracer/Cargo.toml b/benchmarks/guest/revm_snailtracer/Cargo.toml new file mode 100644 index 0000000000..e37595eb36 --- /dev/null +++ b/benchmarks/guest/revm_snailtracer/Cargo.toml @@ -0,0 +1,22 @@ +[workspace] +[package] +name = "openvm-revm-snailtracer" +version = "0.0.0" +edition = "2021" + +[dependencies] +openvm = { path = "../../../crates/toolchain/openvm", features = ["std"] } +revm = { version = "18.0.0", default-features = false } +# revm does not re-export this feature so we enable it here +derive_more = { version = "1.0.0", default-features = false, features = [ + "from", + "display", +] } + +[features] +default = [] + +[profile.profiling] +inherits = "release" +debug = 2 +strip = false diff --git a/benchmarks/guest/revm_snailtracer/elf/openvm-revm-snailtracer.elf b/benchmarks/guest/revm_snailtracer/elf/openvm-revm-snailtracer.elf new file mode 100755 index 0000000000..9255290412 Binary files /dev/null and b/benchmarks/guest/revm_snailtracer/elf/openvm-revm-snailtracer.elf differ diff --git a/benchmarks/guest/revm_snailtracer/openvm.toml b/benchmarks/guest/revm_snailtracer/openvm.toml new file mode 100644 index 0000000000..19a1e670e5 --- /dev/null +++ b/benchmarks/guest/revm_snailtracer/openvm.toml @@ -0,0 +1,3 @@ +[app_vm_config.rv32i] +[app_vm_config.rv32m] +[app_vm_config.io] diff --git a/benchmarks/guest/revm_snailtracer/src/main.rs b/benchmarks/guest/revm_snailtracer/src/main.rs new file mode 100644 index 0000000000..a266ec21c4 --- /dev/null +++ b/benchmarks/guest/revm_snailtracer/src/main.rs @@ -0,0 +1,25 @@ +use openvm as _; + +use revm::{ + db::BenchmarkDB, + primitives::{address, bytes, hex, Bytecode, Bytes, TxKind}, + Evm, +}; + +const BYTES: &str = include_str!("snailtracer.hex"); + +fn main() { + let bytecode = Bytecode::new_raw(Bytes::from(hex::decode(BYTES).unwrap())); + + let mut evm = Evm::builder() + .with_db(BenchmarkDB::new_bytecode(bytecode.clone())) + .modify_tx_env(|tx| { + tx.caller = address!("0000000000000000000000000000000000000001"); + tx.transact_to = TxKind::Call(address!("0000000000000000000000000000000000000000")); + tx.data = bytes!("30627b7c"); + tx.gas_limit = 1_000_000_000; + }) + .build(); + + evm.transact().unwrap(); +} diff --git a/benchmarks/guest/revm_snailtracer/src/snailtracer.hex b/benchmarks/guest/revm_snailtracer/src/snailtracer.hex new file mode 100644 index 0000000000..e69de29bb2 diff --git a/benchmarks/programs/revm_transfer/Cargo.toml b/benchmarks/guest/revm_transfer/Cargo.toml similarity index 91% rename from benchmarks/programs/revm_transfer/Cargo.toml rename to benchmarks/guest/revm_transfer/Cargo.toml index 275d5ee93b..eea02dd155 100644 --- a/benchmarks/programs/revm_transfer/Cargo.toml +++ b/benchmarks/guest/revm_transfer/Cargo.toml @@ -11,7 +11,8 @@ openvm-keccak256-guest = { path = "../../../extensions/keccak256/guest", default tracing = { version = "0.1.40", default-features = false } alloy-primitives = { version = "0.8.10", default-features = false, features = [ "native-keccak", -] } # revm does not re-export this feature so we enable it here +] } +# revm does not re-export this feature so we enable it here derive_more = { version = "1.0.0", default-features = false, features = [ "from", "display", diff --git a/benchmarks/guest/revm_transfer/elf/openvm-revm-transfer.elf b/benchmarks/guest/revm_transfer/elf/openvm-revm-transfer.elf new file mode 100755 index 0000000000..0aa22396e6 Binary files /dev/null and b/benchmarks/guest/revm_transfer/elf/openvm-revm-transfer.elf differ diff --git a/benchmarks/guest/revm_transfer/openvm.toml b/benchmarks/guest/revm_transfer/openvm.toml new file mode 100644 index 0000000000..93998f52a3 --- /dev/null +++ b/benchmarks/guest/revm_transfer/openvm.toml @@ -0,0 +1,4 @@ +[app_vm_config.rv32i] +[app_vm_config.rv32m] +[app_vm_config.io] +[app_vm_config.keccak] diff --git a/benchmarks/programs/revm_transfer/src/main.rs b/benchmarks/guest/revm_transfer/src/main.rs similarity index 100% rename from benchmarks/programs/revm_transfer/src/main.rs rename to benchmarks/guest/revm_transfer/src/main.rs diff --git a/benchmarks/programs/rkyv/Cargo.toml b/benchmarks/guest/rkyv/Cargo.toml similarity index 100% rename from benchmarks/programs/rkyv/Cargo.toml rename to benchmarks/guest/rkyv/Cargo.toml diff --git a/benchmarks/guest/rkyv/elf/openvm-rkyv-program.elf b/benchmarks/guest/rkyv/elf/openvm-rkyv-program.elf new file mode 100755 index 0000000000..528106e233 Binary files /dev/null and b/benchmarks/guest/rkyv/elf/openvm-rkyv-program.elf differ diff --git a/benchmarks/programs/rkyv/minecraft_savedata.bin b/benchmarks/guest/rkyv/minecraft_savedata.bin similarity index 100% rename from benchmarks/programs/rkyv/minecraft_savedata.bin rename to benchmarks/guest/rkyv/minecraft_savedata.bin diff --git a/benchmarks/guest/rkyv/openvm.toml b/benchmarks/guest/rkyv/openvm.toml new file mode 100644 index 0000000000..19a1e670e5 --- /dev/null +++ b/benchmarks/guest/rkyv/openvm.toml @@ -0,0 +1,3 @@ +[app_vm_config.rv32i] +[app_vm_config.rv32m] +[app_vm_config.io] diff --git a/benchmarks/programs/rkyv/src/generate.rs b/benchmarks/guest/rkyv/src/generate.rs similarity index 100% rename from benchmarks/programs/rkyv/src/generate.rs rename to benchmarks/guest/rkyv/src/generate.rs diff --git a/benchmarks/programs/rkyv/src/main.rs b/benchmarks/guest/rkyv/src/main.rs similarity index 100% rename from benchmarks/programs/rkyv/src/main.rs rename to benchmarks/guest/rkyv/src/main.rs diff --git a/benchmarks/programs/rkyv/src/types.rs b/benchmarks/guest/rkyv/src/types.rs similarity index 100% rename from benchmarks/programs/rkyv/src/types.rs rename to benchmarks/guest/rkyv/src/types.rs diff --git a/benchmarks/guest/sha256/Cargo.toml b/benchmarks/guest/sha256/Cargo.toml new file mode 100644 index 0000000000..6526600801 --- /dev/null +++ b/benchmarks/guest/sha256/Cargo.toml @@ -0,0 +1,17 @@ +[workspace] +[package] +name = "openvm-sha256-program" +version = "0.0.0" +edition = "2021" + +[dependencies] +openvm = { path = "../../../crates/toolchain/openvm", features = ["std"] } +openvm-sha256-guest = { path = "../../../extensions/sha256/guest" } + +[features] +default = [] + +[profile.profiling] +inherits = "release" +debug = 2 +strip = false diff --git a/benchmarks/guest/sha256/elf/openvm-sha256-program.elf b/benchmarks/guest/sha256/elf/openvm-sha256-program.elf new file mode 100755 index 0000000000..9524e8f552 Binary files /dev/null and b/benchmarks/guest/sha256/elf/openvm-sha256-program.elf differ diff --git a/benchmarks/guest/sha256/openvm.toml b/benchmarks/guest/sha256/openvm.toml new file mode 100644 index 0000000000..656bf52414 --- /dev/null +++ b/benchmarks/guest/sha256/openvm.toml @@ -0,0 +1,4 @@ +[app_vm_config.rv32i] +[app_vm_config.rv32m] +[app_vm_config.io] +[app_vm_config.sha256] diff --git a/benchmarks/guest/sha256/src/main.rs b/benchmarks/guest/sha256/src/main.rs new file mode 100644 index 0000000000..2c22b2e369 --- /dev/null +++ b/benchmarks/guest/sha256/src/main.rs @@ -0,0 +1,20 @@ +use core::hint::black_box; +use openvm as _; + +use openvm_sha256_guest::sha256; + +const INPUT_LENGTH_BYTES: usize = 100 * 1024; // 100 KB + +pub fn main() { + let mut input = Vec::with_capacity(INPUT_LENGTH_BYTES); + + // Initialize with pseudo-random values + let mut val: u64 = 1; + for _ in 0..INPUT_LENGTH_BYTES { + input.push(val as u8); + val = ((val.wrapping_mul(8191)) << 7) ^ val; + } + + // Prevent optimizer from optimizing away the computation + black_box(sha256(&black_box(input))); +} diff --git a/benchmarks/guest/sha256_iter/Cargo.toml b/benchmarks/guest/sha256_iter/Cargo.toml new file mode 100644 index 0000000000..ee83fed323 --- /dev/null +++ b/benchmarks/guest/sha256_iter/Cargo.toml @@ -0,0 +1,17 @@ +[workspace] +[package] +name = "openvm-sha256-iter-program" +version = "0.0.0" +edition = "2021" + +[dependencies] +openvm = { path = "../../../crates/toolchain/openvm", features = ["std"] } +openvm-sha256-guest = { path = "../../../extensions/sha256/guest" } + +[features] +default = [] + +[profile.profiling] +inherits = "release" +debug = 2 +strip = false diff --git a/benchmarks/guest/sha256_iter/elf/openvm-sha256-iter-program.elf b/benchmarks/guest/sha256_iter/elf/openvm-sha256-iter-program.elf new file mode 100755 index 0000000000..95b469ece5 Binary files /dev/null and b/benchmarks/guest/sha256_iter/elf/openvm-sha256-iter-program.elf differ diff --git a/benchmarks/guest/sha256_iter/openvm.toml b/benchmarks/guest/sha256_iter/openvm.toml new file mode 100644 index 0000000000..656bf52414 --- /dev/null +++ b/benchmarks/guest/sha256_iter/openvm.toml @@ -0,0 +1,4 @@ +[app_vm_config.rv32i] +[app_vm_config.rv32m] +[app_vm_config.io] +[app_vm_config.sha256] diff --git a/benchmarks/guest/sha256_iter/src/main.rs b/benchmarks/guest/sha256_iter/src/main.rs new file mode 100644 index 0000000000..7d0d23dd7f --- /dev/null +++ b/benchmarks/guest/sha256_iter/src/main.rs @@ -0,0 +1,19 @@ +use core::hint::black_box; +use openvm as _; + +use openvm_sha256_guest::sha256; + +const ITERATIONS: usize = 20_000; + +pub fn main() { + // Initialize with hash of an empty vector + let mut hash = black_box(sha256(&vec![])); + + // Iteratively apply sha256 + for _ in 0..ITERATIONS { + hash = sha256(&hash); + } + + // Prevent optimizer from optimizing away the computation + black_box(hash); +} diff --git a/benchmarks/Cargo.toml b/benchmarks/prove/Cargo.toml similarity index 77% rename from benchmarks/Cargo.toml rename to benchmarks/prove/Cargo.toml index f4e0415f36..7809fda948 100644 --- a/benchmarks/Cargo.toml +++ b/benchmarks/prove/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "openvm-benchmarks" +name = "openvm-benchmarks-prove" version.workspace = true authors.workspace = true edition.workspace = true @@ -8,9 +8,9 @@ repository.workspace = true license.workspace = true [dependencies] -openvm-build.workspace = true +openvm-benchmarks-utils.workspace = true openvm-circuit.workspace = true -openvm-sdk.workspace = true +openvm-sdk = { workspace = true, features = ["evm-verify"] } openvm-stark-backend.workspace = true openvm-stark-sdk.workspace = true openvm-transpiler.workspace = true @@ -30,9 +30,7 @@ openvm-native-recursion = { workspace = true, features = ["test-utils"] } clap = { version = "4.5.9", features = ["derive", "env"] } eyre.workspace = true -tempfile.workspace = true -tracing.workspace = true -tokio = { version = "1.41.1", features = ["rt", "rt-multi-thread", "macros"] } +tokio = { version = "1.43.1", features = ["rt", "rt-multi-thread", "macros"] } rand_chacha = { version = "0.3", default-features = false } k256 = { workspace = true, features = ["ecdsa"] } tiny-keccak.workspace = true @@ -40,29 +38,20 @@ derive-new.workspace = true derive_more = { workspace = true, features = ["from"] } num-bigint = { workspace = true, features = ["std", "serde"] } serde.workspace = true +tracing.workspace = true [dev-dependencies] -criterion = { version = "0.5", features = ["html_reports"] } [features] -default = ["parallel", "mimalloc", "bench-metrics"] +default = ["parallel", "jemalloc", "bench-metrics"] bench-metrics = ["openvm-native-recursion/bench-metrics"] profiling = ["openvm-sdk/profiling"] aggregation = [] -static-verifier = ["openvm-native-recursion/static-verifier"] parallel = ["openvm-native-recursion/parallel"] mimalloc = ["openvm-circuit/mimalloc"] jemalloc = ["openvm-circuit/jemalloc"] jemalloc-prof = ["openvm-circuit/jemalloc-prof"] nightly-features = ["openvm-circuit/nightly-features"] -[[bench]] -name = "fibonacci_execute" -harness = false - -[[bench]] -name = "regex_execute" -harness = false - -[[bin]] -name = "fib_e2e" +[package.metadata.cargo-shear] +ignored = ["derive_more"] diff --git a/benchmarks/src/bin/base64_json.rs b/benchmarks/prove/src/bin/base64_json.rs similarity index 91% rename from benchmarks/src/bin/base64_json.rs rename to benchmarks/prove/src/bin/base64_json.rs index 1f0c99f082..3c6f6e6d14 100644 --- a/benchmarks/src/bin/base64_json.rs +++ b/benchmarks/prove/src/bin/base64_json.rs @@ -1,6 +1,6 @@ use clap::Parser; use eyre::Result; -use openvm_benchmarks::utils::BenchmarkCli; +use openvm_benchmarks_prove::util::BenchmarkCli; use openvm_circuit::arch::instructions::exe::VmExe; use openvm_keccak256_circuit::Keccak256Rv32Config; use openvm_keccak256_transpiler::Keccak256TranspilerExtension; @@ -25,7 +25,7 @@ fn main() -> Result<()> { )?; run_with_metric_collection("OUTPUT_PATH", || -> Result<()> { - let data = include_str!("../../programs/base64_json/json_payload_encoded.txt"); + let data = include_str!("../../../guest/base64_json/json_payload_encoded.txt"); let fe_bytes = data.to_owned().into_bytes(); args.bench_from_exe( diff --git a/benchmarks/src/bin/bincode.rs b/benchmarks/prove/src/bin/bincode.rs similarity index 89% rename from benchmarks/src/bin/bincode.rs rename to benchmarks/prove/src/bin/bincode.rs index 938a5aa072..c1add70344 100644 --- a/benchmarks/src/bin/bincode.rs +++ b/benchmarks/prove/src/bin/bincode.rs @@ -1,6 +1,6 @@ use clap::Parser; use eyre::Result; -use openvm_benchmarks::utils::BenchmarkCli; +use openvm_benchmarks_prove::util::BenchmarkCli; use openvm_circuit::arch::instructions::exe::VmExe; use openvm_rv32im_circuit::Rv32ImConfig; use openvm_rv32im_transpiler::{ @@ -22,7 +22,7 @@ fn main() -> Result<()> { .with_extension(Rv32IoTranspilerExtension), )?; run_with_metric_collection("OUTPUT_PATH", || -> Result<()> { - let file_data = include_bytes!("../../programs/bincode/minecraft_savedata.bin"); + let file_data = include_bytes!("../../../guest/bincode/minecraft_savedata.bin"); let stdin = StdIn::from_bytes(file_data); args.bench_from_exe("bincode", Rv32ImConfig::default(), exe, stdin) }) diff --git a/benchmarks/src/bin/ecrecover.rs b/benchmarks/prove/src/bin/ecrecover.rs similarity index 93% rename from benchmarks/src/bin/ecrecover.rs rename to benchmarks/prove/src/bin/ecrecover.rs index 1351dfb7ba..2a2f146f4c 100644 --- a/benchmarks/src/bin/ecrecover.rs +++ b/benchmarks/prove/src/bin/ecrecover.rs @@ -1,5 +1,4 @@ use clap::Parser; -use derive_more::derive::From; use eyre::Result; use k256::ecdsa::{SigningKey, VerifyingKey}; use num_bigint::BigUint; @@ -7,14 +6,10 @@ use openvm_algebra_circuit::{ ModularExtension, ModularExtensionExecutor, ModularExtensionPeriphery, }; use openvm_algebra_transpiler::ModularTranspilerExtension; -use openvm_benchmarks::utils::BenchmarkCli; +use openvm_benchmarks_prove::util::BenchmarkCli; use openvm_circuit::{ - arch::{ - instructions::exe::VmExe, SystemConfig, SystemExecutor, SystemPeriphery, VmChipComplex, - VmConfig, VmInventoryError, - }, - circuit_derive::{Chip, ChipUsageGetter}, - derive::{AnyEnum, InstructionExecutor, VmConfig}, + arch::{instructions::exe::VmExe, SystemConfig}, + derive::VmConfig, }; use openvm_ecc_circuit::{ CurveConfig, WeierstrassExtension, WeierstrassExtensionExecutor, WeierstrassExtensionPeriphery, diff --git a/benchmarks/src/bin/fib_e2e.rs b/benchmarks/prove/src/bin/fib_e2e.rs similarity index 80% rename from benchmarks/src/bin/fib_e2e.rs rename to benchmarks/prove/src/bin/fib_e2e.rs index 63c327ddec..6e1cfa7a35 100644 --- a/benchmarks/src/bin/fib_e2e.rs +++ b/benchmarks/prove/src/bin/fib_e2e.rs @@ -2,7 +2,7 @@ use std::{path::PathBuf, sync::Arc}; use clap::Parser; use eyre::Result; -use openvm_benchmarks::utils::BenchmarkCli; +use openvm_benchmarks_prove::util::BenchmarkCli; use openvm_circuit::arch::instructions::{exe::VmExe, program::DEFAULT_MAX_NUM_PUBLIC_VALUES}; use openvm_native_recursion::halo2::utils::{CacheHalo2ParamsReader, DEFAULT_PARAMS_DIR}; use openvm_rv32im_circuit::Rv32ImConfig; @@ -10,9 +10,11 @@ use openvm_rv32im_transpiler::{ Rv32ITranspilerExtension, Rv32IoTranspilerExtension, Rv32MTranspilerExtension, }; use openvm_sdk::{ - commit::commit_app_exe, prover::ContinuationProver, DefaultStaticVerifierPvHandler, Sdk, StdIn, + commit::commit_app_exe, prover::EvmHalo2Prover, DefaultStaticVerifierPvHandler, Sdk, StdIn, +}; +use openvm_stark_sdk::{ + bench::run_with_metric_collection, config::baby_bear_poseidon2::BabyBearPoseidon2Engine, }; -use openvm_stark_sdk::bench::run_with_metric_collection; use openvm_transpiler::{transpiler::Transpiler, FromElf}; const NUM_PUBLIC_VALUES: usize = DEFAULT_MAX_NUM_PUBLIC_VALUES; @@ -56,8 +58,13 @@ async fn main() -> Result<()> { let mut stdin = StdIn::default(); stdin.write(&n); run_with_metric_collection("OUTPUT_PATH", || { - let mut e2e_prover = - ContinuationProver::new(&halo2_params_reader, app_pk, app_committed_exe, full_agg_pk); + let mut e2e_prover = EvmHalo2Prover::<_, BabyBearPoseidon2Engine>::new( + &halo2_params_reader, + app_pk, + app_committed_exe, + full_agg_pk, + args.agg_tree_config, + ); e2e_prover.set_program_name("fib_e2e"); let _proof = e2e_prover.generate_proof_for_evm(stdin); }); diff --git a/benchmarks/src/bin/fibonacci.rs b/benchmarks/prove/src/bin/fibonacci.rs similarity index 95% rename from benchmarks/src/bin/fibonacci.rs rename to benchmarks/prove/src/bin/fibonacci.rs index 37ce6ff040..3e2875fc35 100644 --- a/benchmarks/src/bin/fibonacci.rs +++ b/benchmarks/prove/src/bin/fibonacci.rs @@ -1,6 +1,6 @@ use clap::Parser; use eyre::Result; -use openvm_benchmarks::utils::BenchmarkCli; +use openvm_benchmarks_prove::util::BenchmarkCli; use openvm_circuit::arch::instructions::exe::VmExe; use openvm_rv32im_circuit::Rv32ImConfig; use openvm_rv32im_transpiler::{ diff --git a/benchmarks/src/bin/kitchen_sink.rs b/benchmarks/prove/src/bin/kitchen_sink.rs similarity index 86% rename from benchmarks/src/bin/kitchen_sink.rs rename to benchmarks/prove/src/bin/kitchen_sink.rs index c5d9ca7ab2..c4b7791330 100644 --- a/benchmarks/src/bin/kitchen_sink.rs +++ b/benchmarks/prove/src/bin/kitchen_sink.rs @@ -4,16 +4,18 @@ use clap::Parser; use eyre::Result; use num_bigint::BigUint; use openvm_algebra_circuit::{Fp2Extension, ModularExtension}; -use openvm_benchmarks::utils::BenchmarkCli; +use openvm_benchmarks_prove::util::BenchmarkCli; use openvm_circuit::arch::{instructions::exe::VmExe, SystemConfig}; use openvm_ecc_circuit::{WeierstrassExtension, P256_CONFIG, SECP256K1_CONFIG}; use openvm_native_recursion::halo2::utils::{CacheHalo2ParamsReader, DEFAULT_PARAMS_DIR}; use openvm_pairing_circuit::{PairingCurve, PairingExtension}; use openvm_sdk::{ - commit::commit_app_exe, config::SdkVmConfig, prover::ContinuationProver, + commit::commit_app_exe, config::SdkVmConfig, prover::EvmHalo2Prover, DefaultStaticVerifierPvHandler, Sdk, StdIn, }; -use openvm_stark_sdk::bench::run_with_metric_collection; +use openvm_stark_sdk::{ + bench::run_with_metric_collection, config::baby_bear_poseidon2::BabyBearPoseidon2Engine, +}; use openvm_transpiler::FromElf; fn main() -> Result<()> { @@ -78,8 +80,13 @@ fn main() -> Result<()> { )?; run_with_metric_collection("OUTPUT_PATH", || -> Result<()> { - let mut prover = - ContinuationProver::new(&halo2_params_reader, app_pk, app_committed_exe, full_agg_pk); + let mut prover = EvmHalo2Prover::<_, BabyBearPoseidon2Engine>::new( + &halo2_params_reader, + app_pk, + app_committed_exe, + full_agg_pk, + args.agg_tree_config, + ); prover.set_program_name("kitchen_sink"); let stdin = StdIn::default(); let _proof = prover.generate_proof_for_evm(stdin); diff --git a/benchmarks/src/bin/pairing.rs b/benchmarks/prove/src/bin/pairing.rs similarity index 96% rename from benchmarks/src/bin/pairing.rs rename to benchmarks/prove/src/bin/pairing.rs index 012285aa6c..6f200172a4 100644 --- a/benchmarks/src/bin/pairing.rs +++ b/benchmarks/prove/src/bin/pairing.rs @@ -1,7 +1,7 @@ use clap::Parser; use eyre::Result; use openvm_algebra_circuit::{Fp2Extension, ModularExtension}; -use openvm_benchmarks::utils::BenchmarkCli; +use openvm_benchmarks_prove::util::BenchmarkCli; use openvm_circuit::arch::SystemConfig; use openvm_ecc_circuit::WeierstrassExtension; use openvm_pairing_circuit::{PairingCurve, PairingExtension}; diff --git a/benchmarks/src/bin/regex.rs b/benchmarks/prove/src/bin/regex.rs similarity index 91% rename from benchmarks/src/bin/regex.rs rename to benchmarks/prove/src/bin/regex.rs index ec9835d92f..6efe8dd5b3 100644 --- a/benchmarks/src/bin/regex.rs +++ b/benchmarks/prove/src/bin/regex.rs @@ -1,6 +1,6 @@ use clap::Parser; use eyre::Result; -use openvm_benchmarks::utils::BenchmarkCli; +use openvm_benchmarks_prove::util::BenchmarkCli; use openvm_circuit::arch::instructions::exe::VmExe; use openvm_keccak256_circuit::Keccak256Rv32Config; use openvm_keccak256_transpiler::Keccak256TranspilerExtension; @@ -24,7 +24,7 @@ fn main() -> Result<()> { .with_extension(Keccak256TranspilerExtension), )?; run_with_metric_collection("OUTPUT_PATH", || -> Result<()> { - let data = include_str!("../../programs/regex/regex_email.txt"); + let data = include_str!("../../../guest/regex/regex_email.txt"); let fe_bytes = data.to_owned().into_bytes(); args.bench_from_exe( diff --git a/benchmarks/src/bin/revm_transfer.rs b/benchmarks/prove/src/bin/revm_transfer.rs similarity index 95% rename from benchmarks/src/bin/revm_transfer.rs rename to benchmarks/prove/src/bin/revm_transfer.rs index 6c3395b202..03027aae83 100644 --- a/benchmarks/src/bin/revm_transfer.rs +++ b/benchmarks/prove/src/bin/revm_transfer.rs @@ -1,6 +1,6 @@ use clap::Parser; use eyre::Result; -use openvm_benchmarks::utils::BenchmarkCli; +use openvm_benchmarks_prove::util::BenchmarkCli; use openvm_circuit::arch::instructions::exe::VmExe; use openvm_keccak256_circuit::Keccak256Rv32Config; use openvm_keccak256_transpiler::Keccak256TranspilerExtension; diff --git a/benchmarks/src/bin/rkyv.rs b/benchmarks/prove/src/bin/rkyv.rs similarity index 89% rename from benchmarks/src/bin/rkyv.rs rename to benchmarks/prove/src/bin/rkyv.rs index e6a07d33b3..8a43bd5679 100644 --- a/benchmarks/src/bin/rkyv.rs +++ b/benchmarks/prove/src/bin/rkyv.rs @@ -1,6 +1,6 @@ use clap::Parser; use eyre::Result; -use openvm_benchmarks::utils::BenchmarkCli; +use openvm_benchmarks_prove::util::BenchmarkCli; use openvm_circuit::arch::instructions::exe::VmExe; use openvm_rv32im_circuit::Rv32ImConfig; use openvm_rv32im_transpiler::{ @@ -23,7 +23,7 @@ fn main() -> Result<()> { )?; run_with_metric_collection("OUTPUT_PATH", || -> Result<()> { - let file_data = include_bytes!("../../programs/rkyv/minecraft_savedata.bin"); + let file_data = include_bytes!("../../../guest/rkyv/minecraft_savedata.bin"); let stdin = StdIn::from_bytes(file_data); args.bench_from_exe("rkyv", Rv32ImConfig::default(), exe, stdin) }) diff --git a/benchmarks/src/bin/verify_fibair.rs b/benchmarks/prove/src/bin/verify_fibair.rs similarity index 91% rename from benchmarks/src/bin/verify_fibair.rs rename to benchmarks/prove/src/bin/verify_fibair.rs index 95f7c6abbe..d2b75f7e71 100644 --- a/benchmarks/src/bin/verify_fibair.rs +++ b/benchmarks/prove/src/bin/verify_fibair.rs @@ -1,6 +1,6 @@ use clap::Parser; use eyre::Result; -use openvm_benchmarks::utils::BenchmarkCli; +use openvm_benchmarks_prove::util::BenchmarkCli; use openvm_circuit::arch::instructions::program::DEFAULT_MAX_NUM_PUBLIC_VALUES; use openvm_native_circuit::NativeConfig; use openvm_native_compiler::conversion::CompilerOptions; @@ -35,7 +35,8 @@ fn main() -> Result<()> { ); run_with_metric_collection("OUTPUT_PATH", || -> Result<()> { - // run_test tries to setup tracing, but it will be ignored since run_with_metric_collection already sets it. + // run_test tries to setup tracing, but it will be ignored since run_with_metric_collection + // already sets it. let (fib_air, fib_input) = collect_airs_and_inputs!(fib_chip); let vdata = engine.run_test(fib_air, fib_input).unwrap(); // Unlike other apps, this "app" does not have continuations enabled. @@ -59,8 +60,8 @@ fn main() -> Result<()> { let app_pk = sdk.app_keygen(app_config)?; let app_vk = app_pk.get_app_vk(); let committed_exe = sdk.commit_app_exe(app_fri_params, program.into())?; - let prover = - AppProver::new(app_pk.app_vm_pk, committed_exe).with_program_name("verify_fibair"); + let prover = AppProver::<_, BabyBearPoseidon2Engine>::new(app_pk.app_vm_pk, committed_exe) + .with_program_name("verify_fibair"); let proof = prover.generate_app_proof_without_continuations(input_stream.into()); sdk.verify_app_proof_without_continuations(&app_vk, &proof)?; Ok(()) diff --git a/benchmarks/prove/src/lib.rs b/benchmarks/prove/src/lib.rs new file mode 100644 index 0000000000..812d1edf2f --- /dev/null +++ b/benchmarks/prove/src/lib.rs @@ -0,0 +1 @@ +pub mod util; diff --git a/benchmarks/src/utils.rs b/benchmarks/prove/src/util.rs similarity index 80% rename from benchmarks/src/utils.rs rename to benchmarks/prove/src/util.rs index 1a04804fd0..ae742cd591 100644 --- a/benchmarks/src/utils.rs +++ b/benchmarks/prove/src/util.rs @@ -1,21 +1,20 @@ -use std::{fs::read, path::PathBuf, sync::Arc}; +use std::{path::PathBuf, sync::Arc}; use clap::{command, Parser}; use eyre::Result; -use openvm_build::{build_guest_package, get_package, guest_methods, GuestOptions}; +use openvm_benchmarks_utils::{build_elf, get_programs_dir}; use openvm_circuit::arch::{instructions::exe::VmExe, DefaultSegmentationStrategy, VmConfig}; use openvm_native_circuit::NativeConfig; use openvm_native_compiler::conversion::CompilerOptions; use openvm_sdk::{ commit::commit_app_exe, config::{ - AggConfig, AggStarkConfig, AppConfig, Halo2Config, DEFAULT_APP_LOG_BLOWUP, - DEFAULT_INTERNAL_LOG_BLOWUP, DEFAULT_LEAF_LOG_BLOWUP, DEFAULT_ROOT_LOG_BLOWUP, + AggConfig, AggStarkConfig, AggregationTreeConfig, AppConfig, Halo2Config, + DEFAULT_APP_LOG_BLOWUP, DEFAULT_INTERNAL_LOG_BLOWUP, DEFAULT_LEAF_LOG_BLOWUP, + DEFAULT_ROOT_LOG_BLOWUP, }, keygen::{leaf_keygen, AppProvingKey}, - prover::{ - vm::local::VmLocalProver, AppProver, LeafProvingController, DEFAULT_NUM_CHILDREN_LEAF, - }, + prover::{vm::local::VmLocalProver, AppProver, LeafProvingController}, Sdk, StdIn, }; use openvm_stark_backend::utils::metrics_span; @@ -24,11 +23,11 @@ use openvm_stark_sdk::{ baby_bear_poseidon2::{BabyBearPoseidon2Config, BabyBearPoseidon2Engine}, FriParameters, }, + engine::StarkFriEngine, openvm_stark_backend::Chip, p3_baby_bear::BabyBear, }; -use openvm_transpiler::{elf::Elf, openvm_platform::memory::MEM_SIZE}; -use tempfile::tempdir; +use openvm_transpiler::elf::Elf; use tracing::info_span; type F = BabyBear; @@ -66,6 +65,10 @@ pub struct BenchmarkCli { #[arg(short, long, alias = "max_segment_length")] pub max_segment_length: Option, + /// Controls the arity (num_children) of the aggregation tree + #[command(flatten)] + pub agg_tree_config: AggregationTreeConfig, + /// Whether to execute with additional profiling metric collection #[arg(long)] pub profiling: bool, @@ -141,7 +144,7 @@ impl BenchmarkCli { } .to_string(); let manifest_dir = get_programs_dir().join(program_name); - build_bench(manifest_dir, profile) + build_elf(&manifest_dir, profile) } pub fn bench_from_exe( @@ -157,7 +160,7 @@ impl BenchmarkCli { VC::Periphery: Chip, { let app_config = self.app_config(vm_config); - bench_from_exe( + bench_from_exe::( bench_name, app_config, exe, @@ -170,30 +173,6 @@ impl BenchmarkCli { } } -pub fn get_programs_dir() -> PathBuf { - let mut dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")).to_path_buf(); - dir.push("programs"); - dir -} - -pub fn build_bench(manifest_dir: PathBuf, profile: impl ToString) -> Result { - let pkg = get_package(manifest_dir); - let target_dir = tempdir()?; - // Build guest with default features - let guest_opts = GuestOptions::default() - .with_target_dir(target_dir.path()) - .with_profile(profile.to_string()); - if let Err(Some(code)) = build_guest_package(&pkg, &guest_opts, None, &None) { - std::process::exit(code); - } - // Assumes the package has a single target binary - let elf_path = guest_methods(&pkg, &target_dir, &guest_opts.features, &guest_opts.profile) - .pop() - .unwrap(); - let data = read(elf_path)?; - Elf::decode(&data, MEM_SIZE as u32) -} - /// 1. Generate proving key from config. /// 2. Commit to the exe by generating cached trace for program. /// 3. Executes runtime @@ -202,7 +181,7 @@ pub fn build_bench(manifest_dir: PathBuf, profile: impl ToString) -> Result /// 6. Verify STARK proofs. /// /// Returns the data necessary for proof aggregation. -pub fn bench_from_exe( +pub fn bench_from_exe>( bench_name: impl ToString, app_config: AppConfig, exe: impl Into>, @@ -229,9 +208,11 @@ where }); // 3. Executes runtime // 4. Generate trace - // 5. Generate STARK proofs for each segment (segmentation is determined by `config`), with timer. + // 5. Generate STARK proofs for each segment (segmentation is determined by `config`), with + // timer. let app_vk = app_pk.get_app_vk(); - let prover = AppProver::new(app_pk.app_vm_pk, committed_exe).with_program_name(bench_name); + let prover = + AppProver::::new(app_pk.app_vm_pk, committed_exe).with_program_name(bench_name); let app_proof = prover.generate_app_proof(input_stream); // 6. Verify STARK proofs, including boundary conditions. let sdk = Sdk::new(); @@ -239,12 +220,10 @@ where .expect("Verification failed"); if let Some(leaf_vm_config) = leaf_vm_config { let leaf_vm_pk = leaf_keygen(app_config.leaf_fri_params.fri_params, leaf_vm_config); - let leaf_prover = VmLocalProver::::new( - leaf_vm_pk, - app_pk.leaf_committed_exe, - ); + let leaf_prover = + VmLocalProver::::new(leaf_vm_pk, app_pk.leaf_committed_exe); let leaf_controller = LeafProvingController { - num_children: DEFAULT_NUM_CHILDREN_LEAF, + num_children: AggregationTreeConfig::default().num_children_leaf, }; leaf_controller.generate_proof(&leaf_prover, &app_proof); } diff --git a/benchmarks/src/lib.rs b/benchmarks/src/lib.rs deleted file mode 100644 index b5614dd823..0000000000 --- a/benchmarks/src/lib.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod utils; diff --git a/benchmarks/utils/Cargo.toml b/benchmarks/utils/Cargo.toml new file mode 100644 index 0000000000..1b1d600a82 --- /dev/null +++ b/benchmarks/utils/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "openvm-benchmarks-utils" +version.workspace = true +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true + +[dependencies] +openvm-build.workspace = true +openvm-transpiler.workspace = true + +cargo_metadata.workspace = true +clap = { version = "4.5.9", features = ["derive", "env"] } +eyre.workspace = true +tempfile.workspace = true +tracing.workspace = true +tracing-subscriber = { version = "0.3.17", features = ["std", "env-filter"] } + +[dev-dependencies] + +[features] +default = [] +build-binaries = [] + +[[bin]] +name = "build-elfs" +path = "src/build-elfs.rs" +required-features = ["build-binaries"] diff --git a/benchmarks/utils/src/build-elfs.rs b/benchmarks/utils/src/build-elfs.rs new file mode 100644 index 0000000000..3bed7cf6fd --- /dev/null +++ b/benchmarks/utils/src/build-elfs.rs @@ -0,0 +1,101 @@ +use std::fs; + +use clap::{arg, Parser}; +use eyre::Result; +use openvm_benchmarks_utils::{build_elf_with_path, get_elf_path_with_pkg, get_programs_dir}; +use openvm_build::get_package; +use tracing_subscriber::{fmt, EnvFilter}; + +#[derive(Parser)] +#[command(author, version, about = "Build OpenVM benchmark programs")] +struct Cli { + /// Force rebuild even if the output ELF already exists + #[arg(short, long)] + force: bool, + + /// Specific program directories to build (builds all if not specified) + #[arg(value_name = "PROGRAM")] + programs: Vec, + + /// Programs to skip + #[arg(long, value_name = "PROGRAM")] + skip: Vec, + + /// Build profile (debug or release) + #[arg(short, long, default_value = "release")] + profile: String, + + /// Enable verbose output + #[arg(short, long)] + verbose: bool, +} + +fn main() -> Result<()> { + let cli = Cli::parse(); + + // Set up logging + let filter = if cli.verbose { + EnvFilter::from_default_env() + } else { + EnvFilter::new("info") + }; + fmt::fmt().with_env_filter(filter).init(); + + let programs_dir = get_programs_dir(); + tracing::info!("Building programs from {}", programs_dir.display()); + + // Collect all available program directories + let available_programs = fs::read_dir(&programs_dir)? + .filter_map(|entry| { + let entry = entry.ok()?; + let path = entry.path(); + + if path.is_dir() { + let dir_name = path.file_name()?.to_str()?.to_string(); + Some((dir_name, path)) + } else { + None + } + }) + .collect::>(); + + // Filter programs if specific ones were requested + let programs_to_build = if cli.programs.is_empty() { + available_programs + } else { + available_programs + .into_iter() + .filter(|(name, _)| cli.programs.contains(name)) + .collect() + }; + + // Filter out skipped programs + let programs_to_build = programs_to_build + .into_iter() + .filter(|(name, _)| !cli.skip.contains(name)) + .collect::>(); + + if programs_to_build.is_empty() { + tracing::warn!("No matching programs found to build"); + return Ok(()); + } + + // Build each selected program + for (dir_name, path) in programs_to_build { + let pkg = get_package(&path); + let elf_path = get_elf_path_with_pkg(&path, &pkg); + + if cli.force || !elf_path.exists() { + tracing::info!("Building: {}", dir_name); + build_elf_with_path(&pkg, &cli.profile, Some(&elf_path))?; + } else { + tracing::info!( + "Skipping existing build: {} (use --force to rebuild)", + dir_name + ); + } + } + + tracing::info!("Build complete"); + Ok(()) +} diff --git a/benchmarks/utils/src/lib.rs b/benchmarks/utils/src/lib.rs new file mode 100644 index 0000000000..99e5ce917b --- /dev/null +++ b/benchmarks/utils/src/lib.rs @@ -0,0 +1,73 @@ +use std::{ + fs::read, + path::{Path, PathBuf}, +}; + +use cargo_metadata::Package; +use eyre::Result; +use openvm_build::{build_guest_package, get_package, guest_methods, GuestOptions}; +use openvm_transpiler::{elf::Elf, openvm_platform::memory::MEM_SIZE}; +use tempfile::tempdir; + +pub fn get_programs_dir() -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../guest") +} + +pub fn build_elf(manifest_dir: &PathBuf, profile: impl ToString) -> Result { + let pkg = get_package(manifest_dir); + build_elf_with_path(&pkg, profile, None) +} + +pub fn build_elf_with_path( + pkg: &Package, + profile: impl ToString, + elf_path: Option<&PathBuf>, +) -> Result { + // Use a temporary directory for the build + let temp_dir = tempdir()?; + let target_dir = temp_dir.path(); + + // Build guest with default features + let guest_opts = GuestOptions::default() + .with_target_dir(target_dir) + .with_profile(profile.to_string()); + + if let Err(Some(code)) = build_guest_package(pkg, &guest_opts, None, &None) { + std::process::exit(code); + } + + // Assumes the package has a single target binary + let temp_elf_path = guest_methods(pkg, target_dir, &guest_opts.features, &guest_opts.profile) + .pop() + .unwrap(); + + // If an elf_path is provided, copy the built ELF to that location + if let Some(dest_path) = elf_path { + // Create parent directories if they don't exist + if let Some(parent) = dest_path.parent() { + if !parent.exists() { + std::fs::create_dir_all(parent)?; + } + } + + // Copy the built ELF to the destination + std::fs::copy(&temp_elf_path, dest_path)?; + } + + read_elf_file(&temp_elf_path) +} + +pub fn get_elf_path(manifest_dir: &PathBuf) -> PathBuf { + let pkg = get_package(manifest_dir); + get_elf_path_with_pkg(manifest_dir, &pkg) +} + +pub fn get_elf_path_with_pkg(manifest_dir: &Path, pkg: &Package) -> PathBuf { + let elf_file_name = format!("{}.elf", &pkg.name); + manifest_dir.join("elf").join(elf_file_name) +} + +pub fn read_elf_file(elf_path: &PathBuf) -> Result { + let data = read(elf_path)?; + Elf::decode(&data, MEM_SIZE as u32) +} diff --git a/book/.markdownlint.json b/book/.markdownlint.json index 2489b3699d..385ff1b06f 100644 --- a/book/.markdownlint.json +++ b/book/.markdownlint.json @@ -14,5 +14,7 @@ "single-title": false, "no-emphasis-as-heading": false, "no-duplicate-header": false, - "no-inline-html": false -} + "no-inline-html": false, + "MD013": false, + "MD029": false + } diff --git a/book/Justfile b/book/Justfile index 8904e6c7c4..313c8c929d 100644 --- a/book/Justfile +++ b/book/Justfile @@ -11,10 +11,10 @@ deps: pnpm i --frozen-lockfile # Lint the workspace for all available targets -lint: lint-book-md-check lint-book-toc-check lint-filenames lint-book-spelling +lint: lint-book-md-check lint-filenames lint-book-spelling # Updates all files to fix linting issues -lint-fix: lint-book-md-fix lint-book-toc +lint-fix: lint-book-md-fix # Validates markdown file formatting lint-book-md-check: @@ -24,14 +24,6 @@ lint-book-md-check: lint-book-md-fix: npx markdownlint-cli2 --fix "./src/**/*.md" -# Validates Table of Content Sections with doctoc -lint-book-toc-check: - npx doctoc '--title=**Table of Contents**' ./src && git diff --exit-code ./src - -# Updates Table of Content Sections with doctoc -lint-book-toc: - npx doctoc '--title=**Table of Contents**' ./src - # Validates spelling using cspell lint-book-spelling: npx cspell "./**/*.md" diff --git a/book/package.json b/book/package.json index 052df9876d..5dd7388530 100644 --- a/book/package.json +++ b/book/package.json @@ -1,14 +1,13 @@ { - "name": "specs", - "version": "1.0.0", - "private": true, - "engines": { - "node": ">=16", - "pnpm": ">=9" - }, - "dependencies": { - "doctoc": "^2.2.1", - "markdownlint-cli2": "0.4.0", - "cspell": "^8.1.3" - } + "name": "specs", + "version": "1.0.0", + "private": true, + "engines": { + "node": ">=16", + "pnpm": ">=9" + }, + "dependencies": { + "cspell": "^8.18.0", + "markdownlint-cli2": "0.17.2" + } } diff --git a/book/pnpm-lock.yaml b/book/pnpm-lock.yaml index 11bc452c5c..43baeacfc0 100644 --- a/book/pnpm-lock.yaml +++ b/book/pnpm-lock.yaml @@ -9,228 +9,231 @@ importers: .: dependencies: cspell: - specifier: ^8.1.3 - version: 8.16.0 - doctoc: - specifier: ^2.2.1 - version: 2.2.1 + specifier: ^8.18.0 + version: 8.18.0 markdownlint-cli2: - specifier: 0.4.0 - version: 0.4.0 + specifier: 0.17.2 + version: 0.17.2 packages: - '@cspell/cspell-bundled-dicts@8.16.0': - resolution: {integrity: sha512-R0Eqq5kTZnmZ0elih5uY3TWjMqqAeMl7ciU7maUs+m1FNjCEdJXtJ9wrQxNgjmXi0tX8cvahZRO3O558tEz/KA==} + '@cspell/cspell-bundled-dicts@8.18.0': + resolution: {integrity: sha512-c7OkDgtYYg0rvj49YS/QzjKeGg/l/d+DjMDqP8BProhKDhAghCsvc6l3SHCGnvyw42+YSTVdL5uLBIaA1OKBlQ==} engines: {node: '>=18'} - '@cspell/cspell-json-reporter@8.16.0': - resolution: {integrity: sha512-KLjPK94gA3JNuWy70LeenJ6EL3SFk2ejERKYJ6SVV/cVOKIvVd2qe42yX3/A/DkF2xzuZ2LD4z0sfoqQL1BaqA==} + '@cspell/cspell-json-reporter@8.18.0': + resolution: {integrity: sha512-glUYtRJ/xIgvCvFqgcF04RJiTFhL95wl1uirP+Qa+oqxvg/zP9zxsZupLD3aeMFhyDpgYwtBiebUmhSqrLDyaw==} engines: {node: '>=18'} - '@cspell/cspell-pipe@8.16.0': - resolution: {integrity: sha512-WoCgrv/mrtwCY4lhc6vEcqN3AQ7lT6K0NW5ShoSo116U2tRaW0unApIYH4Va8u7T9g3wyspFEceQRR1xD9qb9w==} + '@cspell/cspell-pipe@8.18.0': + resolution: {integrity: sha512-bSFncsV16B9nhHJdSMTCzdC0Su/TKs+JGTuCq2UiNi6vWxvsL/S1ueiRb+HT0WYKROwtAeHEKnjlw0G3OBBuFw==} engines: {node: '>=18'} - '@cspell/cspell-resolver@8.16.0': - resolution: {integrity: sha512-b+99bph43ptkXlQHgPXSkN/jK6LQHy2zL1Fm9up7+x6Yr64bxAzWzoeqJAPtnrPvFuOrFN0jZasZzKBw8CvrrQ==} + '@cspell/cspell-resolver@8.18.0': + resolution: {integrity: sha512-kNQJtYYJg6WpLoDUZW8VodovOtKLiDrb+GXmcee01qQmrEPCuub9gwoiRuka0sHI2logz0S8l9eAittClpxReg==} engines: {node: '>=18'} - '@cspell/cspell-service-bus@8.16.0': - resolution: {integrity: sha512-+fn763JKA4EYCOv+1VShFq015UMEBAFRDr+rlCnesgLE0fv9TSFVLsjOfh9/g6GuGQLCRLUqKztwwuueeErstQ==} + '@cspell/cspell-service-bus@8.18.0': + resolution: {integrity: sha512-yGnb59tUadd1q9dSIvg+Q8yZc7N2ZloZ8Sc5lAfxKOJWUh91ugu2UizmL4lm82vDrOevL3kryiauVTsjoS+UOg==} engines: {node: '>=18'} - '@cspell/cspell-types@8.16.0': - resolution: {integrity: sha512-bGrIK7p4NVsK+QX/CYWmjax+FkzfSIZaIaoiBESGV5gmwgXDVRMJ3IP6tQVAmTtckOYHCmtT5CZgI8zXWr8dHQ==} + '@cspell/cspell-types@8.18.0': + resolution: {integrity: sha512-z7ETwulTCAHpSNBqwD5d3Uoui4ClD+tfREoD0cKd9uMbLtN9W/WMVQAGDWJpOCUFrWsGYYM3/3/ob2oZhwdQag==} engines: {node: '>=18'} - '@cspell/dict-ada@4.0.5': - resolution: {integrity: sha512-6/RtZ/a+lhFVmrx/B7bfP7rzC4yjEYe8o74EybXcvu4Oue6J4Ey2WSYj96iuodloj1LWrkNCQyX5h4Pmcj0Iag==} + '@cspell/dict-ada@4.1.0': + resolution: {integrity: sha512-7SvmhmX170gyPd+uHXrfmqJBY5qLcCX8kTGURPVeGxmt8XNXT75uu9rnZO+jwrfuU2EimNoArdVy5GZRGljGNg==} - '@cspell/dict-al@1.0.3': - resolution: {integrity: sha512-V1HClwlfU/qwSq2Kt+MkqRAsonNu3mxjSCDyGRecdLGIHmh7yeEeaxqRiO/VZ4KP+eVSiSIlbwrb5YNFfxYZbw==} + '@cspell/dict-al@1.1.0': + resolution: {integrity: sha512-PtNI1KLmYkELYltbzuoztBxfi11jcE9HXBHCpID2lou/J4VMYKJPNqe4ZjVzSI9NYbMnMnyG3gkbhIdx66VSXg==} - '@cspell/dict-aws@4.0.7': - resolution: {integrity: sha512-PoaPpa2NXtSkhGIMIKhsJUXB6UbtTt6Ao3x9JdU9kn7fRZkwD4RjHDGqulucIOz7KeEX/dNRafap6oK9xHe4RA==} + '@cspell/dict-aws@4.0.9': + resolution: {integrity: sha512-bDYdnnJGwSkIZ4gzrauu7qzOs/ZAY/FnU4k11LgdMI8BhwMfsbsy2EI1iS+sD/BI5ZnNT9kU5YR3WADeNOmhRg==} - '@cspell/dict-bash@4.1.8': - resolution: {integrity: sha512-I2CM2pTNthQwW069lKcrVxchJGMVQBzru2ygsHCwgidXRnJL/NTjAPOFTxN58Jc1bf7THWghfEDyKX/oyfc0yg==} + '@cspell/dict-bash@4.2.0': + resolution: {integrity: sha512-HOyOS+4AbCArZHs/wMxX/apRkjxg6NDWdt0jF9i9XkvJQUltMwEhyA2TWYjQ0kssBsnof+9amax2lhiZnh3kCg==} - '@cspell/dict-companies@3.1.7': - resolution: {integrity: sha512-ncVs/efuAkP1/tLDhWbXukBjgZ5xOUfe03neHMWsE8zvXXc5+Lw6TX5jaJXZLOoES/f4j4AhRE20jsPCF5pm+A==} + '@cspell/dict-companies@3.1.14': + resolution: {integrity: sha512-iqo1Ce4L7h0l0GFSicm2wCLtfuymwkvgFGhmu9UHyuIcTbdFkDErH+m6lH3Ed+QuskJlpQ9dM7puMIGqUlVERw==} - '@cspell/dict-cpp@6.0.2': - resolution: {integrity: sha512-yw5eejWvY4bAnc6LUA44m4WsFwlmgPt2uMSnO7QViGMBDuoeopMma4z9XYvs4lSjTi8fIJs/A1YDfM9AVzb8eg==} + '@cspell/dict-cpp@6.0.6': + resolution: {integrity: sha512-HMV1chsExuZt5IL9rYBW7GmhNZDVdQJEd1WtFgOO6jqiNxbpTG3Is3Pkldl7FpusBQQZr4BdjMit5bnPpVRy3A==} - '@cspell/dict-cryptocurrencies@5.0.3': - resolution: {integrity: sha512-bl5q+Mk+T3xOZ12+FG37dB30GDxStza49Rmoax95n37MTLksk9wBo1ICOlPJ6PnDUSyeuv4SIVKgRKMKkJJglA==} + '@cspell/dict-cryptocurrencies@5.0.4': + resolution: {integrity: sha512-6iFu7Abu+4Mgqq08YhTKHfH59mpMpGTwdzDB2Y8bbgiwnGFCeoiSkVkgLn1Kel2++hYcZ8vsAW/MJS9oXxuMag==} - '@cspell/dict-csharp@4.0.5': - resolution: {integrity: sha512-c/sFnNgtRwRJxtC3JHKkyOm+U3/sUrltFeNwml9VsxKBHVmvlg4tk4ar58PdpW9/zTlGUkWi2i85//DN1EsUCA==} + '@cspell/dict-csharp@4.0.6': + resolution: {integrity: sha512-w/+YsqOknjQXmIlWDRmkW+BHBPJZ/XDrfJhZRQnp0wzpPOGml7W0q1iae65P2AFRtTdPKYmvSz7AL5ZRkCnSIw==} - '@cspell/dict-css@4.0.16': - resolution: {integrity: sha512-70qu7L9z/JR6QLyJPk38fNTKitlIHnfunx0wjpWQUQ8/jGADIhMCrz6hInBjqPNdtGpYm8d1dNFyF8taEkOgrQ==} + '@cspell/dict-css@4.0.17': + resolution: {integrity: sha512-2EisRLHk6X/PdicybwlajLGKF5aJf4xnX2uuG5lexuYKt05xV/J/OiBADmi8q9obhxf1nesrMQbqAt+6CsHo/w==} - '@cspell/dict-dart@2.2.4': - resolution: {integrity: sha512-of/cVuUIZZK/+iqefGln8G3bVpfyN6ZtH+LyLkHMoR5tEj+2vtilGNk9ngwyR8L4lEqbKuzSkOxgfVjsXf5PsQ==} + '@cspell/dict-dart@2.3.0': + resolution: {integrity: sha512-1aY90lAicek8vYczGPDKr70pQSTQHwMFLbmWKTAI6iavmb1fisJBS1oTmMOKE4ximDf86MvVN6Ucwx3u/8HqLg==} - '@cspell/dict-data-science@2.0.5': - resolution: {integrity: sha512-nNSILXmhSJox9/QoXICPQgm8q5PbiSQP4afpbkBqPi/u/b3K9MbNH5HvOOa6230gxcGdbZ9Argl2hY/U8siBlg==} + '@cspell/dict-data-science@2.0.7': + resolution: {integrity: sha512-XhAkK+nSW6zmrnWzusmZ1BpYLc62AWYHZc2p17u4nE2Z9XG5DleG55PCZxXQTKz90pmwlhFM9AfpkJsYaBWATA==} - '@cspell/dict-django@4.1.3': - resolution: {integrity: sha512-yBspeL3roJlO0a1vKKNaWABURuHdHZ9b1L8d3AukX0AsBy9snSggc8xCavPmSzNfeMDXbH+1lgQiYBd3IW03fg==} + '@cspell/dict-django@4.1.4': + resolution: {integrity: sha512-fX38eUoPvytZ/2GA+g4bbdUtCMGNFSLbdJJPKX2vbewIQGfgSFJKY56vvcHJKAvw7FopjvgyS/98Ta9WN1gckg==} - '@cspell/dict-docker@1.1.11': - resolution: {integrity: sha512-s0Yhb16/R+UT1y727ekbR/itWQF3Qz275DR1ahOa66wYtPjHUXmhM3B/LT3aPaX+hD6AWmK23v57SuyfYHUjsw==} + '@cspell/dict-docker@1.1.12': + resolution: {integrity: sha512-6d25ZPBnYZaT9D9An/x6g/4mk542R8bR3ipnby3QFCxnfdd6xaWiTcwDPsCgwN2aQZIQ1jX/fil9KmBEqIK/qA==} - '@cspell/dict-dotnet@5.0.8': - resolution: {integrity: sha512-MD8CmMgMEdJAIPl2Py3iqrx3B708MbCIXAuOeZ0Mzzb8YmLmiisY7QEYSZPg08D7xuwARycP0Ki+bb0GAkFSqg==} + '@cspell/dict-dotnet@5.0.9': + resolution: {integrity: sha512-JGD6RJW5sHtO5lfiJl11a5DpPN6eKSz5M1YBa1I76j4dDOIqgZB6rQexlDlK1DH9B06X4GdDQwdBfnpAB0r2uQ==} - '@cspell/dict-elixir@4.0.6': - resolution: {integrity: sha512-TfqSTxMHZ2jhiqnXlVKM0bUADtCvwKQv2XZL/DI0rx3doG8mEMS8SGPOmiyyGkHpR/pGOq18AFH3BEm4lViHIw==} + '@cspell/dict-elixir@4.0.7': + resolution: {integrity: sha512-MAUqlMw73mgtSdxvbAvyRlvc3bYnrDqXQrx5K9SwW8F7fRYf9V4vWYFULh+UWwwkqkhX9w03ZqFYRTdkFku6uA==} - '@cspell/dict-en-common-misspellings@2.0.7': - resolution: {integrity: sha512-qNFo3G4wyabcwnM+hDrMYKN9vNVg/k9QkhqSlSst6pULjdvPyPs1mqz1689xO/v9t8e6sR4IKc3CgUXDMTYOpA==} + '@cspell/dict-en-common-misspellings@2.0.10': + resolution: {integrity: sha512-80mXJLtr0tVEtzowrI7ycVae/ULAYImZUlr0kUTpa8i57AUk7Zy3pYBs44EYIKW7ZC9AHu4Qjjfq4vriAtyTDQ==} '@cspell/dict-en-gb@1.1.33': resolution: {integrity: sha512-tKSSUf9BJEV+GJQAYGw5e+ouhEe2ZXE620S7BLKe3ZmpnjlNG9JqlnaBhkIMxKnNFkLY2BP/EARzw31AZnOv4g==} - '@cspell/dict-en_us@4.3.28': - resolution: {integrity: sha512-BN1PME7cOl7DXRQJ92pEd1f0Xk5sqjcDfThDGkKcsgwbSOY7KnTc/czBW6Pr3WXIchIm6cT12KEfjNqx7U7Rrw==} + '@cspell/dict-en_us@4.3.35': + resolution: {integrity: sha512-HF6QNyPHkxeo/SosaZXRQlnKDUTjIzrGKyqfbw/fPPlPYrXefAZZ40ofheb5HnbUicR7xqV/lsc/HQfqYshGIw==} - '@cspell/dict-filetypes@3.0.8': - resolution: {integrity: sha512-D3N8sm/iptzfVwsib/jvpX+K/++rM8SRpLDFUaM4jxm8EyGmSIYRbKZvdIv5BkAWmMlTWoRqlLn7Yb1b11jKJg==} + '@cspell/dict-filetypes@3.0.11': + resolution: {integrity: sha512-bBtCHZLo7MiSRUqx5KEiPdGOmXIlDGY+L7SJEtRWZENpAKE+96rT7hj+TUUYWBbCzheqHr0OXZJFEKDgsG/uZg==} - '@cspell/dict-flutter@1.0.3': - resolution: {integrity: sha512-52C9aUEU22ptpgYh6gQyIdA4MP6NPwzbEqndfgPh3Sra191/kgs7CVqXiO1qbtZa9gnYHUoVApkoxRE7mrXHfg==} + '@cspell/dict-flutter@1.1.0': + resolution: {integrity: sha512-3zDeS7zc2p8tr9YH9tfbOEYfopKY/srNsAa+kE3rfBTtQERAZeOhe5yxrnTPoufctXLyuUtcGMUTpxr3dO0iaA==} - '@cspell/dict-fonts@4.0.3': - resolution: {integrity: sha512-sPd17kV5qgYXLteuHFPn5mbp/oCHKgitNfsZLFC3W2fWEgZlhg4hK+UGig3KzrYhhvQ8wBnmZrAQm0TFKCKzsA==} + '@cspell/dict-fonts@4.0.4': + resolution: {integrity: sha512-cHFho4hjojBcHl6qxidl9CvUb492IuSk7xIf2G2wJzcHwGaCFa2o3gRcxmIg1j62guetAeDDFELizDaJlVRIOg==} - '@cspell/dict-fsharp@1.0.4': - resolution: {integrity: sha512-G5wk0o1qyHUNi9nVgdE1h5wl5ylq7pcBjX8vhjHcO4XBq20D5eMoXjwqMo/+szKAqzJ+WV3BgAL50akLKrT9Rw==} + '@cspell/dict-fsharp@1.1.0': + resolution: {integrity: sha512-oguWmHhGzgbgbEIBKtgKPrFSVAFtvGHaQS0oj+vacZqMObwkapcTGu7iwf4V3Bc2T3caf0QE6f6rQfIJFIAVsw==} - '@cspell/dict-fullstack@3.2.3': - resolution: {integrity: sha512-62PbndIyQPH11mAv0PyiyT0vbwD0AXEocPpHlCHzfb5v9SspzCCbzQ/LIBiFmyRa+q5LMW35CnSVu6OXdT+LKg==} + '@cspell/dict-fullstack@3.2.6': + resolution: {integrity: sha512-cSaq9rz5RIU9j+0jcF2vnKPTQjxGXclntmoNp4XB7yFX2621PxJcekGjwf/lN5heJwVxGLL9toR0CBlGKwQBgA==} - '@cspell/dict-gaming-terms@1.0.8': - resolution: {integrity: sha512-7OL0zTl93WFWhhtpXFrtm9uZXItC3ncAs8d0iQDMMFVNU1rBr6raBNxJskxE5wx2Ant12fgI66ZGVagXfN+yfA==} + '@cspell/dict-gaming-terms@1.1.0': + resolution: {integrity: sha512-46AnDs9XkgJ2f1Sqol1WgfJ8gOqp60fojpc9Wxch7x+BA63g4JfMV5/M5x0sI0TLlLY8EBSglcr8wQF/7C80AQ==} - '@cspell/dict-git@3.0.3': - resolution: {integrity: sha512-LSxB+psZ0qoj83GkyjeEH/ZViyVsGEF/A6BAo8Nqc0w0HjD2qX/QR4sfA6JHUgQ3Yi/ccxdK7xNIo67L2ScW5A==} + '@cspell/dict-git@3.0.4': + resolution: {integrity: sha512-C44M+m56rYn6QCsLbiKiedyPTMZxlDdEYAsPwwlL5bhMDDzXZ3Ic8OCQIhMbiunhCOJJT+er4URmOmM+sllnjg==} - '@cspell/dict-golang@6.0.17': - resolution: {integrity: sha512-uDDLEJ/cHdLiqPw4+5BnmIo2i/TSR+uDvYd6JlBjTmjBKpOCyvUgYRztH7nv5e7virsN5WDiUWah4/ATQGz4Pw==} + '@cspell/dict-golang@6.0.19': + resolution: {integrity: sha512-VS+oinB2/CbgmHE06kMJlj52OVMZM0S2EEXph3oaroNTgTuclSwdFylQmOEjquZi55kW+n3FM9MyWXiitB7Dtg==} - '@cspell/dict-google@1.0.4': - resolution: {integrity: sha512-JThUT9eiguCja1mHHLwYESgxkhk17Gv7P3b1S7ZJzXw86QyVHPrbpVoMpozHk0C9o+Ym764B7gZGKmw9uMGduQ==} + '@cspell/dict-google@1.0.8': + resolution: {integrity: sha512-BnMHgcEeaLyloPmBs8phCqprI+4r2Jb8rni011A8hE+7FNk7FmLE3kiwxLFrcZnnb7eqM0agW4zUaNoB0P+z8A==} - '@cspell/dict-haskell@4.0.4': - resolution: {integrity: sha512-EwQsedEEnND/vY6tqRfg9y7tsnZdxNqOxLXSXTsFA6JRhUlr8Qs88iUUAfsUzWc4nNmmzQH2UbtT25ooG9x4nA==} + '@cspell/dict-haskell@4.0.5': + resolution: {integrity: sha512-s4BG/4tlj2pPM9Ha7IZYMhUujXDnI0Eq1+38UTTCpatYLbQqDwRFf2KNPLRqkroU+a44yTUAe0rkkKbwy4yRtQ==} '@cspell/dict-html-symbol-entities@4.0.3': resolution: {integrity: sha512-aABXX7dMLNFdSE8aY844X4+hvfK7977sOWgZXo4MTGAmOzR8524fjbJPswIBK7GaD3+SgFZ2yP2o0CFvXDGF+A==} - '@cspell/dict-html@4.0.10': - resolution: {integrity: sha512-I9uRAcdtHbh0wEtYZlgF0TTcgH0xaw1B54G2CW+tx4vHUwlde/+JBOfIzird4+WcMv4smZOfw+qHf7puFUbI5g==} + '@cspell/dict-html@4.0.11': + resolution: {integrity: sha512-QR3b/PB972SRQ2xICR1Nw/M44IJ6rjypwzA4jn+GH8ydjAX9acFNfc+hLZVyNe0FqsE90Gw3evLCOIF0vy1vQw==} - '@cspell/dict-java@5.0.10': - resolution: {integrity: sha512-pVNcOnmoGiNL8GSVq4WbX/Vs2FGS0Nej+1aEeGuUY9CU14X8yAVCG+oih5ZoLt1jaR8YfR8byUF8wdp4qG4XIw==} + '@cspell/dict-java@5.0.11': + resolution: {integrity: sha512-T4t/1JqeH33Raa/QK/eQe26FE17eUCtWu+JsYcTLkQTci2dk1DfcIKo8YVHvZXBnuM43ATns9Xs0s+AlqDeH7w==} - '@cspell/dict-julia@1.0.4': - resolution: {integrity: sha512-bFVgNX35MD3kZRbXbJVzdnN7OuEqmQXGpdOi9jzB40TSgBTlJWA4nxeAKV4CPCZxNRUGnLH0p05T/AD7Aom9/w==} + '@cspell/dict-julia@1.1.0': + resolution: {integrity: sha512-CPUiesiXwy3HRoBR3joUseTZ9giFPCydSKu2rkh6I2nVjXnl5vFHzOMLXpbF4HQ1tH2CNfnDbUndxD+I+7eL9w==} - '@cspell/dict-k8s@1.0.9': - resolution: {integrity: sha512-Q7GELSQIzo+BERl2ya/nBEnZeQC+zJP19SN1pI6gqDYraM51uYJacbbcWLYYO2Y+5joDjNt/sd/lJtLaQwoSlA==} + '@cspell/dict-k8s@1.0.10': + resolution: {integrity: sha512-313haTrX9prep1yWO7N6Xw4D6tvUJ0Xsx+YhCP+5YrrcIKoEw5Rtlg8R4PPzLqe6zibw6aJ+Eqq+y76Vx5BZkw==} + + '@cspell/dict-kotlin@1.1.0': + resolution: {integrity: sha512-vySaVw6atY7LdwvstQowSbdxjXG6jDhjkWVWSjg1XsUckyzH1JRHXe9VahZz1i7dpoFEUOWQrhIe5B9482UyJQ==} '@cspell/dict-latex@4.0.3': resolution: {integrity: sha512-2KXBt9fSpymYHxHfvhUpjUFyzrmN4c4P8mwIzweLyvqntBT3k0YGZJSriOdjfUjwSygrfEwiuPI1EMrvgrOMJw==} - '@cspell/dict-lorem-ipsum@4.0.3': - resolution: {integrity: sha512-WFpDi/PDYHXft6p0eCXuYnn7mzMEQLVeqpO+wHSUd+kz5ADusZ4cpslAA4wUZJstF1/1kMCQCZM6HLZic9bT8A==} + '@cspell/dict-lorem-ipsum@4.0.4': + resolution: {integrity: sha512-+4f7vtY4dp2b9N5fn0za/UR0kwFq2zDtA62JCbWHbpjvO9wukkbl4rZg4YudHbBgkl73HRnXFgCiwNhdIA1JPw==} - '@cspell/dict-lua@4.0.6': - resolution: {integrity: sha512-Jwvh1jmAd9b+SP9e1GkS2ACbqKKRo9E1f9GdjF/ijmooZuHU0hPyqvnhZzUAxO1egbnNjxS/J2T6iUtjAUK2KQ==} + '@cspell/dict-lua@4.0.7': + resolution: {integrity: sha512-Wbr7YSQw+cLHhTYTKV6cAljgMgcY+EUAxVIZW3ljKswEe4OLxnVJ7lPqZF5JKjlXdgCjbPSimsHqyAbC5pQN/Q==} - '@cspell/dict-makefile@1.0.3': - resolution: {integrity: sha512-R3U0DSpvTs6qdqfyBATnePj9Q/pypkje0Nj26mQJ8TOBQutCRAJbr2ZFAeDjgRx5EAJU/+8txiyVF97fbVRViw==} + '@cspell/dict-makefile@1.0.4': + resolution: {integrity: sha512-E4hG/c0ekPqUBvlkrVvzSoAA+SsDA9bLi4xSV3AXHTVru7Y2bVVGMPtpfF+fI3zTkww/jwinprcU1LSohI3ylw==} - '@cspell/dict-markdown@2.0.7': - resolution: {integrity: sha512-F9SGsSOokFn976DV4u/1eL4FtKQDSgJHSZ3+haPRU5ki6OEqojxKa8hhj4AUrtNFpmBaJx/WJ4YaEzWqG7hgqg==} + '@cspell/dict-markdown@2.0.9': + resolution: {integrity: sha512-j2e6Eg18BlTb1mMP1DkyRFMM/FLS7qiZjltpURzDckB57zDZbUyskOFdl4VX7jItZZEeY0fe22bSPOycgS1Z5A==} peerDependencies: - '@cspell/dict-css': ^4.0.16 - '@cspell/dict-html': ^4.0.10 + '@cspell/dict-css': ^4.0.17 + '@cspell/dict-html': ^4.0.11 '@cspell/dict-html-symbol-entities': ^4.0.3 - '@cspell/dict-typescript': ^3.1.11 + '@cspell/dict-typescript': ^3.2.0 + + '@cspell/dict-monkeyc@1.0.10': + resolution: {integrity: sha512-7RTGyKsTIIVqzbvOtAu6Z/lwwxjGRtY5RkKPlXKHEoEAgIXwfDxb5EkVwzGQwQr8hF/D3HrdYbRT8MFBfsueZw==} - '@cspell/dict-monkeyc@1.0.9': - resolution: {integrity: sha512-Jvf6g5xlB4+za3ThvenYKREXTEgzx5gMUSzrAxIiPleVG4hmRb/GBSoSjtkGaibN3XxGx5x809gSTYCA/IHCpA==} + '@cspell/dict-node@5.0.6': + resolution: {integrity: sha512-CEbhPCpxGvRNByGolSBTrXXW2rJA4bGqZuTx1KKO85mwR6aadeOmUE7xf/8jiCkXSy+qvr9aJeh+jlfXcsrziQ==} - '@cspell/dict-node@5.0.5': - resolution: {integrity: sha512-7NbCS2E8ZZRZwlLrh2sA0vAk9n1kcTUiRp/Nia8YvKaItGXLfxYqD2rMQ3HpB1kEutal6hQLVic3N2Yi1X7AaA==} + '@cspell/dict-npm@5.1.31': + resolution: {integrity: sha512-Oh9nrhgNV4UD1hlbgO3TFQqQRKziwc7qXKoQiC4oqOYIhMs2WL9Ezozku7FY1e7o5XbCIZX9nRH0ymNx/Rwj6w==} - '@cspell/dict-npm@5.1.14': - resolution: {integrity: sha512-7VV/rrRlxOwy5j0bpw6/Uci+nx/rwSgx45FJdeKq++nHsBx/nEXMFNODknm4Mi6i7t7uOVHExpifrR6w6xTWww==} + '@cspell/dict-php@4.0.14': + resolution: {integrity: sha512-7zur8pyncYZglxNmqsRycOZ6inpDoVd4yFfz1pQRe5xaRWMiK3Km4n0/X/1YMWhh3e3Sl/fQg5Axb2hlN68t1g==} - '@cspell/dict-php@4.0.13': - resolution: {integrity: sha512-P6sREMZkhElzz/HhXAjahnICYIqB/HSGp1EhZh+Y6IhvC15AzgtDP8B8VYCIsQof6rPF1SQrFwunxOv8H1e2eg==} + '@cspell/dict-powershell@5.0.14': + resolution: {integrity: sha512-ktjjvtkIUIYmj/SoGBYbr3/+CsRGNXGpvVANrY0wlm/IoGlGywhoTUDYN0IsGwI2b8Vktx3DZmQkfb3Wo38jBA==} - '@cspell/dict-powershell@5.0.13': - resolution: {integrity: sha512-0qdj0XZIPmb77nRTynKidRJKTU0Fl+10jyLbAhFTuBWKMypVY06EaYFnwhsgsws/7nNX8MTEQuewbl9bWFAbsg==} + '@cspell/dict-public-licenses@2.0.13': + resolution: {integrity: sha512-1Wdp/XH1ieim7CadXYE7YLnUlW0pULEjVl9WEeziZw3EKCAw8ZI8Ih44m4bEa5VNBLnuP5TfqC4iDautAleQzQ==} - '@cspell/dict-public-licenses@2.0.11': - resolution: {integrity: sha512-rR5KjRUSnVKdfs5G+gJ4oIvQvm8+NJ6cHWY2N+GE69/FSGWDOPHxulCzeGnQU/c6WWZMSimG9o49i9r//lUQyA==} + '@cspell/dict-python@4.2.16': + resolution: {integrity: sha512-LkQssFt1hPOWXIQiD8ScTkz/41RL7Ti0V/2ytUzEW82dc0atIEksrBg8MuOjWXktp0Dk5tDwRLgmIvhV3CFFOA==} - '@cspell/dict-python@4.2.12': - resolution: {integrity: sha512-U25eOFu+RE0aEcF2AsxZmq3Lic7y9zspJ9SzjrC0mfJz+yr3YmSCw4E0blMD3mZoNcf7H/vMshuKIY5AY36U+Q==} + '@cspell/dict-r@2.1.0': + resolution: {integrity: sha512-k2512wgGG0lTpTYH9w5Wwco+lAMf3Vz7mhqV8+OnalIE7muA0RSuD9tWBjiqLcX8zPvEJr4LdgxVju8Gk3OKyA==} - '@cspell/dict-r@2.0.4': - resolution: {integrity: sha512-cBpRsE/U0d9BRhiNRMLMH1PpWgw+N+1A2jumgt1if9nBGmQw4MUpg2u9I0xlFVhstTIdzXiLXMxP45cABuiUeQ==} + '@cspell/dict-ruby@5.0.8': + resolution: {integrity: sha512-ixuTneU0aH1cPQRbWJvtvOntMFfeQR2KxT8LuAv5jBKqQWIHSxzGlp+zX3SVyoeR0kOWiu64/O5Yn836A5yMcQ==} - '@cspell/dict-ruby@5.0.7': - resolution: {integrity: sha512-4/d0hcoPzi5Alk0FmcyqlzFW9lQnZh9j07MJzPcyVO62nYJJAGKaPZL2o4qHeCS/od/ctJC5AHRdoUm0ktsw6Q==} + '@cspell/dict-rust@4.0.11': + resolution: {integrity: sha512-OGWDEEzm8HlkSmtD8fV3pEcO2XBpzG2XYjgMCJCRwb2gRKvR+XIm6Dlhs04N/K2kU+iH8bvrqNpM8fS/BFl0uw==} - '@cspell/dict-rust@4.0.10': - resolution: {integrity: sha512-6o5C8566VGTTctgcwfF3Iy7314W0oMlFFSQOadQ0OEdJ9Z9ERX/PDimrzP3LGuOrvhtEFoK8pj+BLnunNwRNrw==} + '@cspell/dict-scala@5.0.7': + resolution: {integrity: sha512-yatpSDW/GwulzO3t7hB5peoWwzo+Y3qTc0pO24Jf6f88jsEeKmDeKkfgPbYuCgbE4jisGR4vs4+jfQZDIYmXPA==} - '@cspell/dict-scala@5.0.6': - resolution: {integrity: sha512-tl0YWAfjUVb4LyyE4JIMVE8DlLzb1ecHRmIWc4eT6nkyDqQgHKzdHsnusxFEFMVLIQomgSg0Zz6hJ5S1E4W4ww==} + '@cspell/dict-shell@1.1.0': + resolution: {integrity: sha512-D/xHXX7T37BJxNRf5JJHsvziFDvh23IF/KvkZXNSh8VqcRdod3BAz9VGHZf6VDqcZXr1VRqIYR3mQ8DSvs3AVQ==} - '@cspell/dict-software-terms@4.1.17': - resolution: {integrity: sha512-QORIk1R5DV8oOQ+oAlUWE7UomaJwUucqu2srrc2+PmkoI6R1fJwwg2uHCPBWlIb4PGDNEdXLv9BAD13H+0wytQ==} + '@cspell/dict-software-terms@5.0.2': + resolution: {integrity: sha512-aCzP+M0WXRLYXTriDMZygUe5s4jKyau/nCA6gBGt4EoHfXn+Ua/+DrW766oXOkkESIlqTBtRgb9gWwQvUdOXSQ==} - '@cspell/dict-sql@2.1.8': - resolution: {integrity: sha512-dJRE4JV1qmXTbbGm6WIcg1knmR6K5RXnQxF4XHs5HA3LAjc/zf77F95i5LC+guOGppVF6Hdl66S2UyxT+SAF3A==} + '@cspell/dict-sql@2.2.0': + resolution: {integrity: sha512-MUop+d1AHSzXpBvQgQkCiok8Ejzb+nrzyG16E8TvKL2MQeDwnIvMe3bv90eukP6E1HWb+V/MA/4pnq0pcJWKqQ==} - '@cspell/dict-svelte@1.0.5': - resolution: {integrity: sha512-sseHlcXOqWE4Ner9sg8KsjxwSJ2yssoJNqFHR9liWVbDV+m7kBiUtn2EB690TihzVsEmDr/0Yxrbb5Bniz70mA==} + '@cspell/dict-svelte@1.0.6': + resolution: {integrity: sha512-8LAJHSBdwHCoKCSy72PXXzz7ulGROD0rP1CQ0StOqXOOlTUeSFaJJlxNYjlONgd2c62XBQiN2wgLhtPN+1Zv7Q==} - '@cspell/dict-swift@2.0.4': - resolution: {integrity: sha512-CsFF0IFAbRtYNg0yZcdaYbADF5F3DsM8C4wHnZefQy8YcHP/qjAF/GdGfBFBLx+XSthYuBlo2b2XQVdz3cJZBw==} + '@cspell/dict-swift@2.0.5': + resolution: {integrity: sha512-3lGzDCwUmnrfckv3Q4eVSW3sK3cHqqHlPprFJZD4nAqt23ot7fic5ALR7J4joHpvDz36nHX34TgcbZNNZOC/JA==} - '@cspell/dict-terraform@1.0.6': - resolution: {integrity: sha512-Sqm5vGbXuI9hCFcr4w6xWf4Y25J9SdleE/IqfM6RySPnk8lISEmVdax4k6+Kinv9qaxyvnIbUUN4WFLWcBPQAg==} + '@cspell/dict-terraform@1.1.1': + resolution: {integrity: sha512-07KFDwCU7EnKl4hOZLsLKlj6Zceq/IsQ3LRWUyIjvGFfZHdoGtFdCp3ZPVgnFaAcd/DKv+WVkrOzUBSYqHopQQ==} - '@cspell/dict-typescript@3.1.11': - resolution: {integrity: sha512-FwvK5sKbwrVpdw0e9+1lVTl8FPoHYvfHRuQRQz2Ql5XkC0gwPPkpoyD1zYImjIyZRoYXk3yp9j8ss4iz7A7zoQ==} + '@cspell/dict-typescript@3.2.0': + resolution: {integrity: sha512-Pk3zNePLT8qg51l0M4g1ISowYAEGxTuNfZlgkU5SvHa9Cu7x/BWoyYq9Fvc3kAyoisCjRPyvWF4uRYrPitPDFw==} - '@cspell/dict-vue@3.0.3': - resolution: {integrity: sha512-akmYbrgAGumqk1xXALtDJcEcOMYBYMnkjpmGzH13Ozhq1mkPF4VgllFQlm1xYde+BUKNnzMgPEzxrL2qZllgYA==} + '@cspell/dict-vue@3.0.4': + resolution: {integrity: sha512-0dPtI0lwHcAgSiQFx8CzvqjdoXROcH+1LyqgROCpBgppommWpVhbQ0eubnKotFEXgpUCONVkeZJ6Ql8NbTEu+w==} - '@cspell/dynamic-import@8.16.0': - resolution: {integrity: sha512-FH+B5y71qfunagXiLSJhXP9h/Vwb1Z8Cc/hLmliGekw/Y8BuYknL86tMg9grXBYNmM0kifIv6ZesQl8Km/p/rA==} + '@cspell/dynamic-import@8.18.0': + resolution: {integrity: sha512-TPdY/x9l0DAWCSI8iXDEQSl0xlB9qSbEqIv3UYVpWqbQYCY7MdA15bmai8uKt08sZ8F9L6nYHPtbOGFExHvoSw==} engines: {node: '>=18.0'} - '@cspell/filetypes@8.16.0': - resolution: {integrity: sha512-u2Ub0uSwXFPJFvXhAO/0FZBj3sMr4CeYCiQwTUsdFRkRMFpbTc7Vf+a+aC2vIj6WcaWrYXrJy3NZF/yjqF6SGw==} + '@cspell/filetypes@8.18.0': + resolution: {integrity: sha512-Qd+Fc9CfkCm4Kufe/H8jCLe5px3PwiKmJgdiZ6FJ0i06MU+0XHZGmzWayrL+EoTqfbQA3jLkvnSgWwF0C6Ci6Q==} engines: {node: '>=18'} - '@cspell/strong-weak-map@8.16.0': - resolution: {integrity: sha512-R6N12wEIQpBk2uyni/FU1SFSIjP0uql7ynXVcF1ob8/JJeRoikssydi9Xq5J6ghMw+X50u35mFvg9BgWKz0d+g==} + '@cspell/strong-weak-map@8.18.0': + resolution: {integrity: sha512-u8j+1JsnzJv10c7KaGzCdp8mJ3IL0tJ601+ySdebqVL4VNVKE1OcEV+sYxMjrXbeXkPGlpSwr+yDKMW1WHaC7A==} engines: {node: '>=18'} - '@cspell/url@8.16.0': - resolution: {integrity: sha512-zW+6hAieD/FjysfjY4mVv7iHWWasBP3ldj6L+xy2p4Kuax1nug7uuJqMHlAVude/OywNwENG0rYaP/P9Pg4O+w==} + '@cspell/url@8.18.0': + resolution: {integrity: sha512-jbo66L7Y5WImty4o2s5sL6LwTSHS6XjZDKEUayqxILyNb5XHKRUinyII1/EpglFRi9n7G5w4t714/Aeg1Y90Vg==} engines: {node: '>=18.0'} '@nodelib/fs.scandir@2.1.5': @@ -245,86 +248,65 @@ packages: resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} engines: {node: '>= 8'} - '@textlint/ast-node-types@12.6.1': - resolution: {integrity: sha512-uzlJ+ZsCAyJm+lBi7j0UeBbj+Oy6w/VWoGJ3iHRHE5eZ8Z4iK66mq+PG/spupmbllLtz77OJbY89BYqgFyjXmA==} + '@sindresorhus/merge-streams@2.3.0': + resolution: {integrity: sha512-LtoMMhxAlorcGhmFYI+LhPgbPZCkgP6ra1YL604EeF6U98pLlQ3iWIGMdWSC+vWmPBWBNgmDBAhnAobLROJmwg==} + engines: {node: '>=18'} + + '@types/debug@4.1.12': + resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==} - '@textlint/markdown-to-ast@12.6.1': - resolution: {integrity: sha512-T0HO+VrU9VbLRiEx/kH4+gwGMHNMIGkp0Pok+p0I33saOOLyhfGvwOKQgvt2qkxzQEV2L5MtGB8EnW4r5d3CqQ==} + '@types/katex@0.16.7': + resolution: {integrity: sha512-HMwFiRujE5PjrgwHQ25+bsLJgowjGjm5Z8FVSf0N6PwgJrwxH0QxzHYDcKsTfV3wva0vzrpqMTJS2jXPr5BMEQ==} - '@types/mdast@3.0.15': - resolution: {integrity: sha512-LnwD+mUEfxWMa1QpDraczIn6k0Ee3SMicuYSSzS6ZYl2gKS09EClnJYGd8Du6rfc5r/GZEk5o1mRb8TaTj03sQ==} + '@types/ms@2.1.0': + resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} '@types/unist@2.0.11': resolution: {integrity: sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==} - anchor-markdown-header@0.6.0: - resolution: {integrity: sha512-v7HJMtE1X7wTpNFseRhxsY/pivP4uAJbidVhPT+yhz4i/vV1+qx371IXuV9V7bN6KjFtheLJxqaSm0Y/8neJTA==} - argparse@2.0.1: resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} - array-buffer-byte-length@1.0.1: - resolution: {integrity: sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg==} - engines: {node: '>= 0.4'} - array-timsort@1.0.3: resolution: {integrity: sha512-/+3GRL7dDAGEfM6TseQk/U+mi18TU2Ms9I3UlLdUMhz2hbvGNTKdj9xniwXfUqgYhHxRx0+8UnKkvlNwVU+cWQ==} - array-union@3.0.1: - resolution: {integrity: sha512-1OvF9IbWwaeiM9VhzYXVQacMibxpXOMYVNIvMtKRyX9SImBXpKcFr8XvFDeEslCyuH/t6KRt7HEO94AlP8Iatw==} - engines: {node: '>=12'} - - arraybuffer.prototype.slice@1.0.3: - resolution: {integrity: sha512-bMxMKAjg13EBSVscxTaYA4mRc5t1UAXa2kXiGTNfZ079HIWXEkKmkgFrh/nJqamaLSrXO5H4WFFkPEaLJWbs3A==} - engines: {node: '>= 0.4'} - - available-typed-arrays@1.0.7: - resolution: {integrity: sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==} - engines: {node: '>= 0.4'} - - bail@1.0.5: - resolution: {integrity: sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ==} - braces@3.0.3: resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==} engines: {node: '>=8'} - call-bind@1.0.7: - resolution: {integrity: sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==} - engines: {node: '>= 0.4'} - callsites@3.1.0: resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} engines: {node: '>=6'} - ccount@1.1.0: - resolution: {integrity: sha512-vlNK021QdI7PNeiUh/lKkC/mNHHfV0m/Ad5JoI0TYtlBnJAslM/JIkm/tGC88bkLIwO6OQ5uV6ztS6kVAtCDlg==} - chalk-template@1.1.0: resolution: {integrity: sha512-T2VJbcDuZQ0Tb2EWwSotMPJjgpy1/tGee1BTpUNsGZ/qgNjV2t7Mvu+d4600U564nbLesN1x2dPL+xii174Ekg==} engines: {node: '>=14.16'} - chalk@5.3.0: - resolution: {integrity: sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==} + chalk@5.4.1: + resolution: {integrity: sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==} engines: {node: ^12.17.0 || ^14.13 || >=16.0.0} - character-entities-legacy@1.1.4: - resolution: {integrity: sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==} + character-entities-legacy@3.0.0: + resolution: {integrity: sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==} - character-entities@1.2.4: - resolution: {integrity: sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==} + character-entities@2.0.2: + resolution: {integrity: sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==} - character-reference-invalid@1.1.4: - resolution: {integrity: sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==} + character-reference-invalid@2.0.1: + resolution: {integrity: sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==} clear-module@4.1.2: resolution: {integrity: sha512-LWAxzHqdHsAZlPlEyJ2Poz6AIs384mPeqLVCru2p0BrP9G/kVGuhNyZYClLO6cXlnuJjzC8xtsJIuMjKqLXoAw==} engines: {node: '>=8'} - commander@12.1.0: - resolution: {integrity: sha512-Vw8qHK3bZM9y/P10u3Vib8o/DdkvA2OtPtZvD871QKjy74Wj1WSKFILMPRPSdUSx5RFK1arlJzEtA4PkFgnbuA==} + commander@13.1.0: + resolution: {integrity: sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw==} engines: {node: '>=18'} + commander@8.3.0: + resolution: {integrity: sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==} + engines: {node: '>= 12'} + comment-json@4.2.5: resolution: {integrity: sha512-bKw/r35jR3HGt5PEPm1ljsQQGyCrR8sFGNiN5L+ykDHdpO8Smxkrkla9Yi6NkQyUrb8V54PGhfMs6NrIwtxtdw==} engines: {node: '>= 6'} @@ -332,59 +314,47 @@ packages: core-util-is@1.0.3: resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==} - cspell-config-lib@8.16.0: - resolution: {integrity: sha512-PGT6ohLtIYXYLIm+R5hTcTrF0dzj8e7WAUJSJe5WlV/7lrwVdwgWaliLcXtSSPmfxgczr6sndX9TMJ2IEmPrmg==} + cspell-config-lib@8.18.0: + resolution: {integrity: sha512-Y0hos8IS1rzmU9lTl6v1q6MBr6v9nhJy5IacZXSJhBSTHRYemsvICTnn+PtksUFgrEEqWusOdAsgBhYV0nlSuw==} engines: {node: '>=18'} - cspell-dictionary@8.16.0: - resolution: {integrity: sha512-Y3sN6ttLBKbu0dOLcduY641n5QP1srUvZkW4bOTnG455DbIZfilrP1El/2Hl0RS6hC8LN9PM4bsIm/2xgdbApA==} + cspell-dictionary@8.18.0: + resolution: {integrity: sha512-yf7anUDHYFPuQ53619BILYswm4E08NzyNPO1cF0GyqGe5ZTTHf/rCNYcuZHtQ7yKmQuC/K8/y2kEeLsqNa0p6A==} engines: {node: '>=18'} - cspell-gitignore@8.16.0: - resolution: {integrity: sha512-ODKe0ooyzYSBJkwgIVZSRIvzoZfT4tEbFt4fFDT88wPyyfX7xp7MAQhXy5KD1ocXH0WvYbdv37qzn2UbckrahA==} + cspell-gitignore@8.18.0: + resolution: {integrity: sha512-HYWAK7282o9CkcMwqC3w1wNjgae1v4CMgzF3ptpyBonjISkj1GdFGMno4Gu2uW43aKGTmyj4Fi9J94UZvzZa4w==} engines: {node: '>=18'} hasBin: true - cspell-glob@8.16.0: - resolution: {integrity: sha512-xJSXRHwfENCNFmjpVSEucXY8E3BrpSCA+TukmOYtLyaMKtn6EAwoCpEU7Oj2tZOjdivprPmQ74k4Dqb1RHjIVQ==} + cspell-glob@8.18.0: + resolution: {integrity: sha512-ox3ygu5+3tXR3+XRbYJy/z+YK1zo4TFQFkvUEr+aV8Ogyvgm7qE9uTaFz6krkkMLNG6l8EZ7mJtdn0ZsXF/WKQ==} engines: {node: '>=18'} - cspell-grammar@8.16.0: - resolution: {integrity: sha512-vvbJEkBqXocGH/H975RtkfMzVpNxNGMd0JCDd+NjbpeRyZceuChFw5Tie7kHteFY29SwZovub+Am3F4H1kmf9A==} + cspell-grammar@8.18.0: + resolution: {integrity: sha512-/h8gLULvH+P+8N/cmIx8M85sqlER6AyO/RoCVudfq7lTkFneXXKmCoHSA2YQbod9ZSjL+voivBokN2yjMR+XEA==} engines: {node: '>=18'} hasBin: true - cspell-io@8.16.0: - resolution: {integrity: sha512-WIK5uhPMjGsTAzm2/fGRbIdr7zWsMVG1fn8wNJYUiYELuyvzvLelfI1VG6szaFCGYqd6Uvgb/fS0uNbwGqCLAQ==} + cspell-io@8.18.0: + resolution: {integrity: sha512-W6CfXY5dlGTd6XWgHl4B2qLD/gla9TXDVdSo3ViCMJoVu82UQD8b4mir5RfHqXiMrz7ItDeUy9BxFN42VB2YcA==} engines: {node: '>=18'} - cspell-lib@8.16.0: - resolution: {integrity: sha512-fU8CfECyuhT12COIi4ViQu2bTkdqaa+05YSd2ZV8k8NA7lapPaMFnlooxdfcwwgZJfHeMhRVMzvQF1OhWmwGfA==} + cspell-lib@8.18.0: + resolution: {integrity: sha512-346CAY12pVk40FWnfPOwajKug61EeawW3QMtJE/f6rMYGAjGxGExhZnl6eR/KuCMt/w60kqQMSjGDw2zJjJqUw==} engines: {node: '>=18'} - cspell-trie-lib@8.16.0: - resolution: {integrity: sha512-Io1qqI0r4U9ewAWBLClFBBlxLeAoIi15PUGJi4Za1xrlgQJwRE8PMNIJNHKmPEIp78Iute3o/JyC2OfWlxl4Sw==} + cspell-trie-lib@8.18.0: + resolution: {integrity: sha512-zhrCAHyQ2uiGpFdp6E336/L2oDTh/0fM22VpGbkBS4uYKqG9jMy4eUZdSKS8Lg3St4YdGK14J7dv/PiMLqqxlw==} engines: {node: '>=18'} - cspell@8.16.0: - resolution: {integrity: sha512-U6Up/4nODE+Ca+zqwZXTgBioGuF2JQHLEUIuoRJkJzAZkIBYDqrMXM+zdSL9E39+xb9jAtr9kPAYJf1Eybgi9g==} + cspell@8.18.0: + resolution: {integrity: sha512-+6lJaR4zI/250vAR3qXwRj9O80Q4dHUuJWVXCZQV2L6HdF+s5ThS7+HYmE5zdf1YpPCtYJJ/6stkKsdUCQtkTA==} engines: {node: '>=18'} hasBin: true - data-view-buffer@1.0.1: - resolution: {integrity: sha512-0lht7OugA5x3iJLOWFhWK/5ehONdprk0ISXqVFn/NFrDu+cuc8iADFrGQz5BnRK7LLU3JmkbXSxaqX+/mXYtUA==} - engines: {node: '>= 0.4'} - - data-view-byte-length@1.0.1: - resolution: {integrity: sha512-4J7wRJD3ABAzr8wP+OcIcqq2dlUKp4DVflx++hs5h5ZKydWMI6/D/fAot+yh6g2tHh8fLFTvNOaVN357NvSrOQ==} - engines: {node: '>= 0.4'} - - data-view-byte-offset@1.0.0: - resolution: {integrity: sha512-t/Ygsytq+R995EJ5PZlD4Cu56sWa8InXySaViRzw9apusqsOO2bQP+SbYzAhR0pFKoB+43lYy8rWban9JSuXnA==} - engines: {node: '>= 0.4'} - - debug@4.3.7: - resolution: {integrity: sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==} + debug@4.4.0: + resolution: {integrity: sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==} engines: {node: '>=6.0'} peerDependencies: supports-color: '*' @@ -392,107 +362,45 @@ packages: supports-color: optional: true - define-data-property@1.1.4: - resolution: {integrity: sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==} - engines: {node: '>= 0.4'} - - define-properties@1.2.1: - resolution: {integrity: sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==} - engines: {node: '>= 0.4'} - - dir-glob@3.0.1: - resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==} - engines: {node: '>=8'} - - doctoc@2.2.1: - resolution: {integrity: sha512-qNJ1gsuo7hH40vlXTVVrADm6pdg30bns/Mo7Nv1SxuXSM1bwF9b4xQ40a6EFT/L1cI+Yylbyi8MPI4G4y7XJzQ==} - hasBin: true + decode-named-character-reference@1.1.0: + resolution: {integrity: sha512-Wy+JTSbFThEOXQIR2L6mxJvEs+veIzpmqD7ynWxMXGpnk3smkHQOp6forLdHsKpAMW9iJpaBBIxz285t1n1C3w==} - dom-serializer@1.4.1: - resolution: {integrity: sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==} - - domelementtype@2.3.0: - resolution: {integrity: sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==} - - domhandler@4.3.1: - resolution: {integrity: sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==} - engines: {node: '>= 4'} - - domutils@2.8.0: - resolution: {integrity: sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==} - - emoji-regex@10.1.0: - resolution: {integrity: sha512-xAEnNCT3w2Tg6MA7ly6QqYJvEoY1tm9iIjJ3yMKK9JPlWuRHAMoe5iETwQnx3M9TVbFMfsrBgWKR+IsmswwNjg==} - - entities@2.1.0: - resolution: {integrity: sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w==} + dequal@2.0.3: + resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==} + engines: {node: '>=6'} - entities@2.2.0: - resolution: {integrity: sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==} + devlop@1.1.0: + resolution: {integrity: sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==} - entities@3.0.1: - resolution: {integrity: sha512-WiyBqoomrwMdFG1e0kqvASYfnlb0lp8M5o5Fw2OFq1hNZxxcNk8Ik0Xm7LxzBhuidnZB/UtBqVCgUz3kBOP51Q==} + entities@4.5.0: + resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==} engines: {node: '>=0.12'} env-paths@3.0.0: resolution: {integrity: sha512-dtJUTepzMW3Lm/NPxRf3wP4642UWhjL2sQxc+ym2YMj1m/H2zDNQOlezafzkHwn6sMstjHTwG6iQQsctDW/b1A==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - es-abstract@1.23.5: - resolution: {integrity: sha512-vlmniQ0WNPwXqA0BnmwV3Ng7HxiGlh6r5U6JcTMNx8OilcAGqVJBHJcPjqOMaczU9fRuRK5Px2BdVyPRnKMMVQ==} - engines: {node: '>= 0.4'} - - es-define-property@1.0.0: - resolution: {integrity: sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==} - engines: {node: '>= 0.4'} - - es-errors@1.3.0: - resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} - engines: {node: '>= 0.4'} - - es-object-atoms@1.0.0: - resolution: {integrity: sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw==} - engines: {node: '>= 0.4'} - - es-set-tostringtag@2.0.3: - resolution: {integrity: sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ==} - engines: {node: '>= 0.4'} - - es-to-primitive@1.3.0: - resolution: {integrity: sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g==} - engines: {node: '>= 0.4'} - - escape-string-regexp@4.0.0: - resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} - engines: {node: '>=10'} - esprima@4.0.1: resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==} engines: {node: '>=4'} hasBin: true - extend@3.0.2: - resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==} - - fast-equals@5.0.1: - resolution: {integrity: sha512-WF1Wi8PwwSY7/6Kx0vKXtw8RwuSGoM1bvDaJbu7MxDlR1vovZjIAKrnzyrThgAjm6JDTu0fVgWXDlMGspodfoQ==} + fast-equals@5.2.2: + resolution: {integrity: sha512-V7/RktU11J3I36Nwq2JnZEM7tNm17eBJz+u25qdxBZeCKiX6BkVSZQjwWIr+IobgnZy+ag73tTZgZi7tr0LrBw==} engines: {node: '>=6.0.0'} - fast-glob@3.3.2: - resolution: {integrity: sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==} + fast-glob@3.3.3: + resolution: {integrity: sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==} engines: {node: '>=8.6.0'} fast-json-stable-stringify@2.1.0: resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} - fastq@1.17.1: - resolution: {integrity: sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==} - - fault@1.0.4: - resolution: {integrity: sha512-CJ0HCB5tL5fYTEA7ToAq5+kTwd++Borf1/bifxd9iT70QcXr4MRrO3Llf8Ifs70q+SJcGHFtnIE/Nw6giCtECA==} + fastq@1.19.1: + resolution: {integrity: sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==} - fdir@6.4.2: - resolution: {integrity: sha512-KnhMXsKSPZlAhp7+IjUkRZKPb4fUyccpDrdFXbi4QL1qkmFh9kVY09Yox+n4MaOb3lHZ1Tv829C3oaaXoMYPDQ==} + fdir@6.4.3: + resolution: {integrity: sha512-PMXmW2y1hDDfTSRc9gaXIuCCRpuoz3Kaz8cUelp3smouvfT632ozg2vrT6lJsHKKOF59YLbOGfAWGUcKEfRMQw==} peerDependencies: picomatch: ^3 || ^4 peerDependenciesMeta: @@ -507,50 +415,21 @@ packages: resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==} engines: {node: '>=8'} - find-up-simple@1.0.0: - resolution: {integrity: sha512-q7Us7kcjj2VMePAa02hDAF6d+MzsdsAWEwYyOpwUtlerRBkOEPBCRZrAV4XfcSN8fHAgaD0hP7miwoay6DCprw==} - engines: {node: '>=18'} - flat-cache@5.0.0: resolution: {integrity: sha512-JrqFmyUl2PnPi1OvLyTVHnQvwQ0S+e6lGSwu8OkAZlSaNIZciTY2H/cOOROxsBA1m/LZNHDsqAgDZt6akWcjsQ==} engines: {node: '>=18'} - flatted@3.3.2: - resolution: {integrity: sha512-AiwGJM8YcNOaobumgtng+6NHuOqC3A7MixFeDafM3X9cIUM+xUXoS5Vfgf+OihAYe20fxqNM9yPBXJzRtZ/4eA==} - - for-each@0.3.3: - resolution: {integrity: sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==} - - format@0.2.2: - resolution: {integrity: sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==} - engines: {node: '>=0.4.x'} - - function-bind@1.1.2: - resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} - - function.prototype.name@1.1.6: - resolution: {integrity: sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==} - engines: {node: '>= 0.4'} - - functions-have-names@1.2.3: - resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==} + flatted@3.3.3: + resolution: {integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==} gensequence@7.0.0: resolution: {integrity: sha512-47Frx13aZh01afHJTB3zTtKIlFI6vWY+MYCN9Qpew6i52rfKjnhCF/l1YlC8UmEMvvntZZ6z4PiCcmyuedR2aQ==} engines: {node: '>=18'} - get-intrinsic@1.2.4: - resolution: {integrity: sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==} - engines: {node: '>= 0.4'} - get-stdin@9.0.0: resolution: {integrity: sha512-dVKBjfWisLAicarI2Sf+JuBE/DghV4UzNAVe9yhEJuzeREd3JhOTE9cUaJTeSa77fsbQUK3pcOpJfM59+VKZaA==} engines: {node: '>=12'} - get-symbol-description@1.0.2: - resolution: {integrity: sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg==} - engines: {node: '>= 0.4'} - glob-parent@5.1.2: resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} engines: {node: '>= 6'} @@ -559,52 +438,20 @@ packages: resolution: {integrity: sha512-wHTUcDUoZ1H5/0iVqEudYW4/kAlN5cZ3j/bXn0Dpbizl9iaUVeWSHqiOjsgk6OW2bkLclbBjzewBz6weQ1zA2Q==} engines: {node: '>=18'} - globalthis@1.0.4: - resolution: {integrity: sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==} - engines: {node: '>= 0.4'} - - globby@12.1.0: - resolution: {integrity: sha512-YULDaNwsoUZkRy9TWSY/M7Obh0abamTKoKzTfOI3uU+hfpX2FZqOq8LFDxsjYheF1RH7ITdArgbQnsNBFgcdBA==} - engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - - gopd@1.0.1: - resolution: {integrity: sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==} - - has-bigints@1.0.2: - resolution: {integrity: sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==} + globby@14.0.2: + resolution: {integrity: sha512-s3Fq41ZVh7vbbe2PN3nrW7yC7U7MFVc5c98/iTl9c2GawNMKx/J648KQRW6WKkuU8GIbbh2IXfIRQjOZnXcTnw==} + engines: {node: '>=18'} has-own-prop@2.0.0: resolution: {integrity: sha512-Pq0h+hvsVm6dDEa8x82GnLSYHOzNDt7f0ddFa3FqcQlgzEiptPqL+XrOJNavjOzSYiYWIrgeVYYgGlLmnxwilQ==} engines: {node: '>=8'} - has-property-descriptors@1.0.2: - resolution: {integrity: sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==} - - has-proto@1.0.3: - resolution: {integrity: sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==} - engines: {node: '>= 0.4'} - - has-symbols@1.0.3: - resolution: {integrity: sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==} - engines: {node: '>= 0.4'} - - has-tostringtag@1.0.2: - resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} - engines: {node: '>= 0.4'} - - hasown@2.0.2: - resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} - engines: {node: '>= 0.4'} - - htmlparser2@7.2.0: - resolution: {integrity: sha512-H7MImA4MS6cw7nbyURtLPO1Tms7C5H602LRETv95z1MxO/7CP7rDVROehUYeYBUYEON94NXXDEPmZuq+hX4sog==} - ignore@5.3.2: resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==} engines: {node: '>= 4'} - import-fresh@3.3.0: - resolution: {integrity: sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==} + import-fresh@3.3.1: + resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==} engines: {node: '>=6'} import-meta-resolve@4.1.0: @@ -614,256 +461,157 @@ packages: resolution: {integrity: sha512-QQnnxNyfvmHFIsj7gkPcYymR8Jdw/o7mp5ZFihxn6h8Ci6fh3Dx4E1gPjpQEpIuPo9XVNY/ZUwh4BPMjGyL01g==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} - internal-slot@1.0.7: - resolution: {integrity: sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g==} - engines: {node: '>= 0.4'} - - is-alphabetical@1.0.4: - resolution: {integrity: sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==} - - is-alphanumerical@1.0.4: - resolution: {integrity: sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==} - - is-array-buffer@3.0.4: - resolution: {integrity: sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw==} - engines: {node: '>= 0.4'} + is-alphabetical@2.0.1: + resolution: {integrity: sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==} - is-async-function@2.0.0: - resolution: {integrity: sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA==} - engines: {node: '>= 0.4'} + is-alphanumerical@2.0.1: + resolution: {integrity: sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==} - is-bigint@1.0.4: - resolution: {integrity: sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==} - - is-boolean-object@1.1.2: - resolution: {integrity: sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==} - engines: {node: '>= 0.4'} - - is-buffer@2.0.5: - resolution: {integrity: sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==} - engines: {node: '>=4'} - - is-callable@1.2.7: - resolution: {integrity: sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==} - engines: {node: '>= 0.4'} - - is-data-view@1.0.1: - resolution: {integrity: sha512-AHkaJrsUVW6wq6JS8y3JnM/GJF/9cf+k20+iDzlSaJrinEo5+7vRiteOSwBhHRiAyQATN1AmY4hwzxJKPmYf+w==} - engines: {node: '>= 0.4'} - - is-date-object@1.0.5: - resolution: {integrity: sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==} - engines: {node: '>= 0.4'} - - is-decimal@1.0.4: - resolution: {integrity: sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==} + is-decimal@2.0.1: + resolution: {integrity: sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==} is-extglob@2.1.1: resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} engines: {node: '>=0.10.0'} - is-finalizationregistry@1.1.0: - resolution: {integrity: sha512-qfMdqbAQEwBw78ZyReKnlA8ezmPdb9BemzIIip/JkjaZUhitfXDkkr+3QTboW0JrSXT1QWyYShpvnNHGZ4c4yA==} - engines: {node: '>= 0.4'} - - is-generator-function@1.0.10: - resolution: {integrity: sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==} - engines: {node: '>= 0.4'} - is-glob@4.0.3: resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} engines: {node: '>=0.10.0'} - is-hexadecimal@1.0.4: - resolution: {integrity: sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==} - - is-map@2.0.3: - resolution: {integrity: sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==} - engines: {node: '>= 0.4'} - - is-negative-zero@2.0.3: - resolution: {integrity: sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==} - engines: {node: '>= 0.4'} - - is-number-object@1.0.7: - resolution: {integrity: sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==} - engines: {node: '>= 0.4'} + is-hexadecimal@2.0.1: + resolution: {integrity: sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==} is-number@7.0.0: resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} engines: {node: '>=0.12.0'} - is-plain-obj@2.1.0: - resolution: {integrity: sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==} - engines: {node: '>=8'} - - is-regex@1.1.4: - resolution: {integrity: sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==} - engines: {node: '>= 0.4'} - - is-set@2.0.3: - resolution: {integrity: sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==} - engines: {node: '>= 0.4'} - - is-shared-array-buffer@1.0.3: - resolution: {integrity: sha512-nA2hv5XIhLR3uVzDDfCIknerhx8XUKnstuOERPNNIinXG7v9u+ohXF67vxm4TPTEPU6lm61ZkwP3c9PCB97rhg==} - engines: {node: '>= 0.4'} - - is-string@1.0.7: - resolution: {integrity: sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==} - engines: {node: '>= 0.4'} - - is-symbol@1.0.4: - resolution: {integrity: sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==} - engines: {node: '>= 0.4'} - - is-typed-array@1.1.13: - resolution: {integrity: sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw==} - engines: {node: '>= 0.4'} - - is-weakmap@2.0.2: - resolution: {integrity: sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==} - engines: {node: '>= 0.4'} - - is-weakref@1.0.2: - resolution: {integrity: sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==} - - is-weakset@2.0.3: - resolution: {integrity: sha512-LvIm3/KWzS9oRFHugab7d+M/GcBXuXX5xZkzPmN+NxihdQlZUQ4dWuSV1xR/sq6upL1TJEDrfBgRepHFdBtSNQ==} - engines: {node: '>= 0.4'} - - isarray@2.0.5: - resolution: {integrity: sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==} + js-yaml@4.1.0: + resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} + hasBin: true json-buffer@3.0.1: resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==} + jsonc-parser@3.3.1: + resolution: {integrity: sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ==} + + katex@0.16.21: + resolution: {integrity: sha512-XvqR7FgOHtWupfMiigNzmh+MgUVmDGU2kXZm899ZkPfcuoPuFxyHmXsgATDpFZDAXCI8tvinaVcDo8PIIJSo4A==} + hasBin: true + keyv@4.5.4: resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} - linkify-it@3.0.3: - resolution: {integrity: sha512-ynTsyrFSdE5oZ/O9GEf00kPngmOfVwazR5GKDq6EYfhlpFug3J2zybX56a2PRRpc9P+FuSoGNAwjlbDs9jJBPQ==} + linkify-it@5.0.0: + resolution: {integrity: sha512-5aHCbzQRADcdP+ATqnDuhhJ/MRIqDkZX5pyjFHRRysS8vZ5AbqGEoFIb6pYHPZ+L/OC2Lc+xT8uHVVR5CAK/wQ==} - longest-streak@2.0.4: - resolution: {integrity: sha512-vM6rUVCVUJJt33bnmHiZEvr7wPT78ztX7rojL+LW51bHtLh6HTjx84LA5W4+oa6aKEJA7jJu5LR6vQRBpA5DVg==} - - markdown-it@12.3.2: - resolution: {integrity: sha512-TchMembfxfNVpHkbtriWltGWc+m3xszaRD0CZup7GFFhzIgQqxIfn3eGj1yZpfuflzPvfkt611B2Q/Bsk1YnGg==} + markdown-it@14.1.0: + resolution: {integrity: sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==} hasBin: true - markdown-table@2.0.0: - resolution: {integrity: sha512-Ezda85ToJUBhM6WGaG6veasyym+Tbs3cMAw/ZhOPqXiYsr0jgocBV3j3nx+4lk47plLlIqjwuTm/ywVI+zjJ/A==} - - markdownlint-cli2-formatter-default@0.0.3: - resolution: {integrity: sha512-QEAJitT5eqX1SNboOD+SO/LNBpu4P4je8JlR02ug2cLQAqmIhh8IJnSK7AcaHBHhNADqdGydnPpQOpsNcEEqCw==} + markdownlint-cli2-formatter-default@0.0.5: + resolution: {integrity: sha512-4XKTwQ5m1+Txo2kuQ3Jgpo/KmnG+X90dWt4acufg6HVGadTUG5hzHF/wssp9b5MBYOMCnZ9RMPaU//uHsszF8Q==} peerDependencies: markdownlint-cli2: '>=0.0.4' - markdownlint-cli2@0.4.0: - resolution: {integrity: sha512-EcwP5tAbyzzL3ACI0L16LqbNctmh8wNX56T+aVvIxWyTAkwbYNx2V7IheRkXS3mE7R/pnaApZ/RSXcXuzRVPjg==} - engines: {node: '>=12'} + markdownlint-cli2@0.17.2: + resolution: {integrity: sha512-XH06ZOi8wCrtOSSj3p8y3yJzwgzYOSa7lglNyS3fP05JPRzRGyjauBb5UvlLUSCGysMmULS1moxdRHHudV+g/Q==} + engines: {node: '>=18'} hasBin: true - markdownlint-rule-helpers@0.16.0: - resolution: {integrity: sha512-oEacRUVeTJ5D5hW1UYd2qExYI0oELdYK72k1TKGvIeYJIbqQWAz476NAc7LNixSySUhcNl++d02DvX0ccDk9/w==} + markdownlint@0.37.4: + resolution: {integrity: sha512-u00joA/syf3VhWh6/ybVFkib5Zpj2e5KB/cfCei8fkSRuums6nyisTWGqjTWIOFoFwuXoTBQQiqlB4qFKp8ncQ==} + engines: {node: '>=18'} - markdownlint@0.25.1: - resolution: {integrity: sha512-AG7UkLzNa1fxiOv5B+owPsPhtM4D6DoODhsJgiaNg1xowXovrYgOnLqAgOOFQpWOlHFVQUzjMY5ypNNTeov92g==} - engines: {node: '>=12'} + mdurl@2.0.0: + resolution: {integrity: sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w==} + + merge2@1.4.1: + resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} + engines: {node: '>= 8'} - mdast-util-find-and-replace@1.1.1: - resolution: {integrity: sha512-9cKl33Y21lyckGzpSmEQnIDjEfeeWelN5s1kUW1LwdB0Fkuq2u+4GdqcGEygYxJE8GVqCl0741bYXHgamfWAZA==} + micromark-core-commonmark@2.0.2: + resolution: {integrity: sha512-FKjQKbxd1cibWMM1P9N+H8TwlgGgSkWZMmfuVucLCHaYqeSvJ0hFeHsIa65pA2nYbes0f8LDHPMrd9X7Ujxg9w==} - mdast-util-footnote@0.1.7: - resolution: {integrity: sha512-QxNdO8qSxqbO2e3m09KwDKfWiLgqyCurdWTQ198NpbZ2hxntdc+VKS4fDJCmNWbAroUdYnSthu+XbZ8ovh8C3w==} + micromark-extension-directive@3.0.2: + resolution: {integrity: sha512-wjcXHgk+PPdmvR58Le9d7zQYWy+vKEU9Se44p2CrCDPiLr2FMyiT4Fyb5UFKFC66wGB3kPlgD7q3TnoqPS7SZA==} - mdast-util-from-markdown@0.8.5: - resolution: {integrity: sha512-2hkTXtYYnr+NubD/g6KGBS/0mFmBcifAsI0yIWRiRo0PjVs6SSOSOdtzbp6kSGnShDN6G5aWZpKQ2lWRy27mWQ==} + micromark-extension-gfm-autolink-literal@2.1.0: + resolution: {integrity: sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==} - mdast-util-frontmatter@0.2.0: - resolution: {integrity: sha512-FHKL4w4S5fdt1KjJCwB0178WJ0evnyyQr5kXTM3wrOVpytD0hrkvd+AOOjU9Td8onOejCkmZ+HQRT3CZ3coHHQ==} + micromark-extension-gfm-footnote@2.1.0: + resolution: {integrity: sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==} - mdast-util-gfm-autolink-literal@0.1.3: - resolution: {integrity: sha512-GjmLjWrXg1wqMIO9+ZsRik/s7PLwTaeCHVB7vRxUwLntZc8mzmTsLVr6HW1yLokcnhfURsn5zmSVdi3/xWWu1A==} + micromark-extension-gfm-table@2.1.0: + resolution: {integrity: sha512-Ub2ncQv+fwD70/l4ou27b4YzfNaCJOvyX4HxXU15m7mpYY+rjuWzsLIPZHJL253Z643RpbcP1oeIJlQ/SKW67g==} - mdast-util-gfm-strikethrough@0.2.3: - resolution: {integrity: sha512-5OQLXpt6qdbttcDG/UxYY7Yjj3e8P7X16LzvpX8pIQPYJ/C2Z1qFGMmcw+1PZMUM3Z8wt8NRfYTvCni93mgsgA==} + micromark-extension-math@3.1.0: + resolution: {integrity: sha512-lvEqd+fHjATVs+2v/8kg9i5Q0AP2k85H0WUOwpIVvUML8BapsMvh1XAogmQjOCsLpoKRCVQqEkQBB3NhVBcsOg==} - mdast-util-gfm-table@0.1.6: - resolution: {integrity: sha512-j4yDxQ66AJSBwGkbpFEp9uG/LS1tZV3P33fN1gkyRB2LoRL+RR3f76m0HPHaby6F4Z5xr9Fv1URmATlRRUIpRQ==} + micromark-factory-destination@2.0.1: + resolution: {integrity: sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==} - mdast-util-gfm-task-list-item@0.1.6: - resolution: {integrity: sha512-/d51FFIfPsSmCIRNp7E6pozM9z1GYPIkSy1urQ8s/o4TC22BZ7DqfHFWiqBD23bc7J3vV1Fc9O4QIHBlfuit8A==} + micromark-factory-label@2.0.1: + resolution: {integrity: sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==} - mdast-util-gfm@0.1.2: - resolution: {integrity: sha512-NNkhDx/qYcuOWB7xHUGWZYVXvjPFFd6afg6/e2g+SV4r9q5XUcCbV4Wfa3DLYIiD+xAEZc6K4MGaE/m0KDcPwQ==} + micromark-factory-space@2.0.1: + resolution: {integrity: sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==} - mdast-util-to-markdown@0.6.5: - resolution: {integrity: sha512-XeV9sDE7ZlOQvs45C9UKMtfTcctcaj/pGwH8YLbMHoMOXNNCn2LsqVQOqrF1+/NU8lKDAqozme9SCXWyo9oAcQ==} + micromark-factory-title@2.0.1: + resolution: {integrity: sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==} - mdast-util-to-string@2.0.0: - resolution: {integrity: sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w==} + micromark-factory-whitespace@2.0.1: + resolution: {integrity: sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==} - mdurl@1.0.1: - resolution: {integrity: sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g==} + micromark-util-character@2.1.1: + resolution: {integrity: sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==} - merge2@1.4.1: - resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} - engines: {node: '>= 8'} + micromark-util-chunked@2.0.1: + resolution: {integrity: sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==} - micromark-extension-footnote@0.3.2: - resolution: {integrity: sha512-gr/BeIxbIWQoUm02cIfK7mdMZ/fbroRpLsck4kvFtjbzP4yi+OPVbnukTc/zy0i7spC2xYE/dbX1Sur8BEDJsQ==} + micromark-util-classify-character@2.0.1: + resolution: {integrity: sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==} - micromark-extension-frontmatter@0.2.2: - resolution: {integrity: sha512-q6nPLFCMTLtfsctAuS0Xh4vaolxSFUWUWR6PZSrXXiRy+SANGllpcqdXFv2z07l0Xz/6Hl40hK0ffNCJPH2n1A==} + micromark-util-combine-extensions@2.0.1: + resolution: {integrity: sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==} - micromark-extension-gfm-autolink-literal@0.5.7: - resolution: {integrity: sha512-ePiDGH0/lhcngCe8FtH4ARFoxKTUelMp4L7Gg2pujYD5CSMb9PbblnyL+AAMud/SNMyusbS2XDSiPIRcQoNFAw==} + micromark-util-decode-numeric-character-reference@2.0.2: + resolution: {integrity: sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==} - micromark-extension-gfm-strikethrough@0.6.5: - resolution: {integrity: sha512-PpOKlgokpQRwUesRwWEp+fHjGGkZEejj83k9gU5iXCbDG+XBA92BqnRKYJdfqfkrRcZRgGuPuXb7DaK/DmxOhw==} + micromark-util-encode@2.0.1: + resolution: {integrity: sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==} - micromark-extension-gfm-table@0.4.3: - resolution: {integrity: sha512-hVGvESPq0fk6ALWtomcwmgLvH8ZSVpcPjzi0AjPclB9FsVRgMtGZkUcpE0zgjOCFAznKepF4z3hX8z6e3HODdA==} + micromark-util-html-tag-name@2.0.1: + resolution: {integrity: sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==} - micromark-extension-gfm-tagfilter@0.3.0: - resolution: {integrity: sha512-9GU0xBatryXifL//FJH+tAZ6i240xQuFrSL7mYi8f4oZSbc+NvXjkrHemeYP0+L4ZUT+Ptz3b95zhUZnMtoi/Q==} + micromark-util-normalize-identifier@2.0.1: + resolution: {integrity: sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==} - micromark-extension-gfm-task-list-item@0.3.3: - resolution: {integrity: sha512-0zvM5iSLKrc/NQl84pZSjGo66aTGd57C1idmlWmE87lkMcXrTxg1uXa/nXomxJytoje9trP0NDLvw4bZ/Z/XCQ==} + micromark-util-resolve-all@2.0.1: + resolution: {integrity: sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==} - micromark-extension-gfm@0.3.3: - resolution: {integrity: sha512-oVN4zv5/tAIA+l3GbMi7lWeYpJ14oQyJ3uEim20ktYFAcfX1x3LNlFGGlmrZHt7u9YlKExmyJdDGaTt6cMSR/A==} + micromark-util-sanitize-uri@2.0.1: + resolution: {integrity: sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==} - micromark@2.11.4: - resolution: {integrity: sha512-+WoovN/ppKolQOFIAajxi7Lu9kInbPxFuTBVEavFcL8eAfVstoc5MocPmqBeAdBOJV00uaVjegzH4+MA0DN/uA==} + micromark-util-subtokenize@2.1.0: + resolution: {integrity: sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==} - micromatch@4.0.4: - resolution: {integrity: sha512-pRmzw/XUcwXGpD9aI9q/0XOwLNygjETJ8y0ao0wdqprrzDa4YnxLcz7fQRZr8voh8V10kGhABbNcHVk5wHgWwg==} - engines: {node: '>=8.6'} + micromark-util-symbol@2.0.1: + resolution: {integrity: sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==} + + micromark-util-types@2.0.1: + resolution: {integrity: sha512-534m2WhVTddrcKVepwmVEVnUAmtrx9bfIjNoQHRqfnvdaHQiFytEhJoTgpWJvDEXCO5gLTQh3wYC1PgOJA4NSQ==} + + micromark@4.0.1: + resolution: {integrity: sha512-eBPdkcoCNvYcxQOAKAlceo5SNdzZWfF+FcSupREAzdAh9rRmE239CEQAiTwIgblwnoM8zzj35sZ5ZwvSEOF6Kw==} micromatch@4.0.8: resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} engines: {node: '>=8.6'} - minimist@1.2.8: - resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} - ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} - object-inspect@1.13.3: - resolution: {integrity: sha512-kDCGIbxkDSXE3euJZZXzc6to7fCrKHNI/hSRQnRuQ+BWjFNzZwiFF8fj/6o2t2G9/jTj8PSIYTfCLelLZEeRpA==} - engines: {node: '>= 0.4'} - - object-keys@1.1.1: - resolution: {integrity: sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==} - engines: {node: '>= 0.4'} - - object.assign@4.1.5: - resolution: {integrity: sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==} - engines: {node: '>= 0.4'} - parent-module@1.0.1: resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} engines: {node: '>=6'} @@ -872,12 +620,12 @@ packages: resolution: {integrity: sha512-uo0Z9JJeWzv8BG+tRcapBKNJ0dro9cLyczGzulS6EfeyAdeC9sbojtW6XwvYxJkEne9En+J2XEl4zyglVeIwFg==} engines: {node: '>=8'} - parse-entities@2.0.0: - resolution: {integrity: sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==} + parse-entities@4.0.2: + resolution: {integrity: sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==} - path-type@4.0.0: - resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==} - engines: {node: '>=8'} + path-type@5.0.0: + resolution: {integrity: sha512-5HviZNaZcfqP95rwpv+1HDgUamezbqdSYTyzjTvwtJSnIH+3vnbmWsItli8OFEndS984VT55M3jduxZbX351gg==} + engines: {node: '>=12'} picomatch@2.3.1: resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} @@ -887,33 +635,13 @@ packages: resolution: {integrity: sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==} engines: {node: '>=12'} - possible-typed-array-names@1.0.0: - resolution: {integrity: sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q==} - engines: {node: '>= 0.4'} + punycode.js@2.3.1: + resolution: {integrity: sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA==} + engines: {node: '>=6'} queue-microtask@1.2.3: resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} - reflect.getprototypeof@1.0.7: - resolution: {integrity: sha512-bMvFGIUKlc/eSfXNX+aZ+EL95/EgZzuwA0OBPTbZZDEJw/0AkentjMuM1oiRfwHrshqk4RzdgiTg5CcDalXN5g==} - engines: {node: '>= 0.4'} - - regexp.prototype.flags@1.5.3: - resolution: {integrity: sha512-vqlC04+RQoFalODCbCumG2xIOvapzVMHwsyIGM/SIE8fRhFFsXeH8/QQ+s0T0kDAhKc4k30s73/0ydkHQz6HlQ==} - engines: {node: '>= 0.4'} - - remark-footnotes@3.0.0: - resolution: {integrity: sha512-ZssAvH9FjGYlJ/PBVKdSmfyPc3Cz4rTWgZLI4iE/SX8Nt5l3o3oEjv3wwG5VD7xOjktzdwp5coac+kJV9l4jgg==} - - remark-frontmatter@3.0.0: - resolution: {integrity: sha512-mSuDd3svCHs+2PyO29h7iijIZx4plX0fheacJcAoYAASfgzgVIcXGYSq9GFyYocFLftQs8IOmmkgtOovs6d4oA==} - - remark-gfm@1.0.0: - resolution: {integrity: sha512-KfexHJCiqvrdBZVbQ6RopMZGwaXz6wFJEfByIuEwGf0arvITHjiKKZ1dpXujjH9KZdm1//XJQwgfnJ3lmXaDPA==} - - remark-parse@9.0.0: - resolution: {integrity: sha512-geKatMwSzEXKHuzBNU1z676sGcDcFoChMK38TgdHJNAYfFtsfHDQG7MoJAjs6sgYMqyLduCYWDIWZIxiPeafEw==} - repeat-string@1.6.1: resolution: {integrity: sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==} engines: {node: '>=0.10'} @@ -926,361 +654,264 @@ packages: resolution: {integrity: sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==} engines: {node: '>=8'} - reusify@1.0.4: - resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} + reusify@1.1.0: + resolution: {integrity: sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==} engines: {iojs: '>=1.0.0', node: '>=0.10.0'} run-parallel@1.2.0: resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} - safe-array-concat@1.1.2: - resolution: {integrity: sha512-vj6RsCsWBCf19jIeHEfkRMw8DPiBb+DMXklQ/1SGDHOMlHdPUkZXFQ2YdplS23zESTijAcurb1aSgJA3AgMu1Q==} - engines: {node: '>=0.4'} - - safe-regex-test@1.0.3: - resolution: {integrity: sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw==} - engines: {node: '>= 0.4'} - - semver@7.6.3: - resolution: {integrity: sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==} + semver@7.7.1: + resolution: {integrity: sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==} engines: {node: '>=10'} hasBin: true - set-function-length@1.2.2: - resolution: {integrity: sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==} - engines: {node: '>= 0.4'} - - set-function-name@2.0.2: - resolution: {integrity: sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==} - engines: {node: '>= 0.4'} - - side-channel@1.0.6: - resolution: {integrity: sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==} - engines: {node: '>= 0.4'} - - slash@4.0.0: - resolution: {integrity: sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==} - engines: {node: '>=12'} - - string.prototype.trim@1.2.9: - resolution: {integrity: sha512-klHuCNxiMZ8MlsOihJhJEBJAiMVqU3Z2nEXWfWnIqjN0gEFS9J9+IxKozWWtQGcgoa1WUZzLjKPTr4ZHNFTFxw==} - engines: {node: '>= 0.4'} - - string.prototype.trimend@1.0.8: - resolution: {integrity: sha512-p73uL5VCHCO2BZZ6krwwQE3kCzM7NKmis8S//xEC6fQonchbum4eP6kR4DLEjQFO3Wnj3Fuo8NM0kOSjVdHjZQ==} - - string.prototype.trimstart@1.0.8: - resolution: {integrity: sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==} - engines: {node: '>= 0.4'} - - strip-json-comments@4.0.0: - resolution: {integrity: sha512-LzWcbfMbAsEDTRmhjWIioe8GcDRl0fa35YMXFoJKDdiD/quGFmjJjdgPjFJJNwCMaLyQqFIDqCdHD2V4HfLgYA==} - engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + slash@5.1.0: + resolution: {integrity: sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg==} + engines: {node: '>=14.16'} - tinyglobby@0.2.10: - resolution: {integrity: sha512-Zc+8eJlFMvgatPZTl6A9L/yht8QqdmUNtURHaKZLmKBE12hNPSrqNkUp2cs3M/UKmNVVAMFQYSjYIVHDjW5zew==} + tinyglobby@0.2.12: + resolution: {integrity: sha512-qkf4trmKSIiMTs/E63cxH+ojC2unam7rJ0WrauAzpT3ECNTxGRMlaXxVbfxMUC/w0LaYk6jQ4y/nGR9uBO3tww==} engines: {node: '>=12.0.0'} to-regex-range@5.0.1: resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} engines: {node: '>=8.0'} - traverse@0.6.10: - resolution: {integrity: sha512-hN4uFRxbK+PX56DxYiGHsTn2dME3TVr9vbNqlQGcGcPhJAn+tdP126iA+TArMpI4YSgnTkMWyoLl5bf81Hi5TA==} - engines: {node: '>= 0.4'} - - trough@1.0.5: - resolution: {integrity: sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==} - - typed-array-buffer@1.0.2: - resolution: {integrity: sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ==} - engines: {node: '>= 0.4'} - - typed-array-byte-length@1.0.1: - resolution: {integrity: sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw==} - engines: {node: '>= 0.4'} - - typed-array-byte-offset@1.0.3: - resolution: {integrity: sha512-GsvTyUHTriq6o/bHcTd0vM7OQ9JEdlvluu9YISaA7+KzDzPaIzEeDFNkTfhdE3MYcNhNi0vq/LlegYgIs5yPAw==} - engines: {node: '>= 0.4'} - - typed-array-length@1.0.7: - resolution: {integrity: sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg==} - engines: {node: '>= 0.4'} + uc.micro@2.1.0: + resolution: {integrity: sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==} - typedarray.prototype.slice@1.0.3: - resolution: {integrity: sha512-8WbVAQAUlENo1q3c3zZYuy5k9VzBQvp8AX9WOtbvyWlLM1v5JaSRmjubLjzHF4JFtptjH/5c/i95yaElvcjC0A==} - engines: {node: '>= 0.4'} - - uc.micro@1.0.6: - resolution: {integrity: sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==} - - unbox-primitive@1.0.2: - resolution: {integrity: sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==} - - underscore@1.13.7: - resolution: {integrity: sha512-GMXzWtsc57XAtguZgaQViUOzs0KTkk8ojr3/xAxXLITqf/3EMwxC0inyETfDFjH/Krbhuep0HNbbjI9i/q3F3g==} - - unified@9.2.2: - resolution: {integrity: sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ==} - - unist-util-is@4.1.0: - resolution: {integrity: sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==} - - unist-util-stringify-position@2.0.3: - resolution: {integrity: sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==} - - unist-util-visit-parents@3.1.1: - resolution: {integrity: sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==} - - update-section@0.3.3: - resolution: {integrity: sha512-BpRZMZpgXLuTiKeiu7kK0nIPwGdyrqrs6EDSaXtjD/aQ2T+qVo9a5hRC3HN3iJjCMxNT/VxoLGQ7E/OzE5ucnw==} - - vfile-message@2.0.4: - resolution: {integrity: sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==} - - vfile@4.2.1: - resolution: {integrity: sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA==} + unicorn-magic@0.1.0: + resolution: {integrity: sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==} + engines: {node: '>=18'} vscode-languageserver-textdocument@1.0.12: resolution: {integrity: sha512-cxWNPesCnQCcMPeenjKKsOCKQZ/L6Tv19DTRIGuLWe32lyzWhihGVJ/rcckZXJxfdKCFvRLS3fpBIsV/ZGX4zA==} - vscode-uri@3.0.8: - resolution: {integrity: sha512-AyFQ0EVmsOZOlAnxoFOGOq1SQDWAB7C6aqMGS23svWAllfOaxbuFvcT8D1i8z3Gyn8fraVeZNNmN6e9bxxXkKw==} - - which-boxed-primitive@1.0.2: - resolution: {integrity: sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==} - - which-builtin-type@1.2.0: - resolution: {integrity: sha512-I+qLGQ/vucCby4tf5HsLmGueEla4ZhwTBSqaooS+Y0BuxN4Cp+okmGuV+8mXZ84KDI9BA+oklo+RzKg0ONdSUA==} - engines: {node: '>= 0.4'} - - which-collection@1.0.2: - resolution: {integrity: sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==} - engines: {node: '>= 0.4'} - - which-typed-array@1.1.15: - resolution: {integrity: sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA==} - engines: {node: '>= 0.4'} + vscode-uri@3.1.0: + resolution: {integrity: sha512-/BpdSx+yCQGnCvecbyXdxHDkuk55/G3xwnC0GqY4gmQ3j+A+g8kzzgB4Nk/SINjqn6+waqw3EgbVF2QKExkRxQ==} xdg-basedir@5.1.0: resolution: {integrity: sha512-GCPAHLvrIH13+c0SuacwvRYj2SxJXQ4kaVTT5xgL3kPrz56XxkF21IGhjSE1+W0aw7gpBWRGXLCPnPby6lSpmQ==} engines: {node: '>=12'} - yaml@1.10.2: - resolution: {integrity: sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==} - engines: {node: '>= 6'} - - yaml@2.6.1: - resolution: {integrity: sha512-7r0XPzioN/Q9kXBro/XPnA6kznR73DHq+GXh5ON7ZozRO6aMjbmiBuKste2wslTFkC5d1dw0GooOCepZXJ2SAg==} + yaml@2.7.0: + resolution: {integrity: sha512-+hSoy/QHluxmC9kCIJyL/uyFmLmc+e5CFR5Wa+bpIhIj85LVb9ZH2nVnqrHoSvKogwODv0ClqZkmiSSaIH5LTA==} engines: {node: '>= 14'} hasBin: true - zwitch@1.0.5: - resolution: {integrity: sha512-V50KMwwzqJV0NpZIZFwfOD5/lyny3WlSzRiXgA0G7VUnRlqttta1L6UQIHzd6EuBY/cHGfwTIck7w1yH6Q5zUw==} - snapshots: - '@cspell/cspell-bundled-dicts@8.16.0': - dependencies: - '@cspell/dict-ada': 4.0.5 - '@cspell/dict-al': 1.0.3 - '@cspell/dict-aws': 4.0.7 - '@cspell/dict-bash': 4.1.8 - '@cspell/dict-companies': 3.1.7 - '@cspell/dict-cpp': 6.0.2 - '@cspell/dict-cryptocurrencies': 5.0.3 - '@cspell/dict-csharp': 4.0.5 - '@cspell/dict-css': 4.0.16 - '@cspell/dict-dart': 2.2.4 - '@cspell/dict-django': 4.1.3 - '@cspell/dict-docker': 1.1.11 - '@cspell/dict-dotnet': 5.0.8 - '@cspell/dict-elixir': 4.0.6 - '@cspell/dict-en-common-misspellings': 2.0.7 + '@cspell/cspell-bundled-dicts@8.18.0': + dependencies: + '@cspell/dict-ada': 4.1.0 + '@cspell/dict-al': 1.1.0 + '@cspell/dict-aws': 4.0.9 + '@cspell/dict-bash': 4.2.0 + '@cspell/dict-companies': 3.1.14 + '@cspell/dict-cpp': 6.0.6 + '@cspell/dict-cryptocurrencies': 5.0.4 + '@cspell/dict-csharp': 4.0.6 + '@cspell/dict-css': 4.0.17 + '@cspell/dict-dart': 2.3.0 + '@cspell/dict-data-science': 2.0.7 + '@cspell/dict-django': 4.1.4 + '@cspell/dict-docker': 1.1.12 + '@cspell/dict-dotnet': 5.0.9 + '@cspell/dict-elixir': 4.0.7 + '@cspell/dict-en-common-misspellings': 2.0.10 '@cspell/dict-en-gb': 1.1.33 - '@cspell/dict-en_us': 4.3.28 - '@cspell/dict-filetypes': 3.0.8 - '@cspell/dict-flutter': 1.0.3 - '@cspell/dict-fonts': 4.0.3 - '@cspell/dict-fsharp': 1.0.4 - '@cspell/dict-fullstack': 3.2.3 - '@cspell/dict-gaming-terms': 1.0.8 - '@cspell/dict-git': 3.0.3 - '@cspell/dict-golang': 6.0.17 - '@cspell/dict-google': 1.0.4 - '@cspell/dict-haskell': 4.0.4 - '@cspell/dict-html': 4.0.10 + '@cspell/dict-en_us': 4.3.35 + '@cspell/dict-filetypes': 3.0.11 + '@cspell/dict-flutter': 1.1.0 + '@cspell/dict-fonts': 4.0.4 + '@cspell/dict-fsharp': 1.1.0 + '@cspell/dict-fullstack': 3.2.6 + '@cspell/dict-gaming-terms': 1.1.0 + '@cspell/dict-git': 3.0.4 + '@cspell/dict-golang': 6.0.19 + '@cspell/dict-google': 1.0.8 + '@cspell/dict-haskell': 4.0.5 + '@cspell/dict-html': 4.0.11 '@cspell/dict-html-symbol-entities': 4.0.3 - '@cspell/dict-java': 5.0.10 - '@cspell/dict-julia': 1.0.4 - '@cspell/dict-k8s': 1.0.9 + '@cspell/dict-java': 5.0.11 + '@cspell/dict-julia': 1.1.0 + '@cspell/dict-k8s': 1.0.10 + '@cspell/dict-kotlin': 1.1.0 '@cspell/dict-latex': 4.0.3 - '@cspell/dict-lorem-ipsum': 4.0.3 - '@cspell/dict-lua': 4.0.6 - '@cspell/dict-makefile': 1.0.3 - '@cspell/dict-markdown': 2.0.7(@cspell/dict-css@4.0.16)(@cspell/dict-html-symbol-entities@4.0.3)(@cspell/dict-html@4.0.10)(@cspell/dict-typescript@3.1.11) - '@cspell/dict-monkeyc': 1.0.9 - '@cspell/dict-node': 5.0.5 - '@cspell/dict-npm': 5.1.14 - '@cspell/dict-php': 4.0.13 - '@cspell/dict-powershell': 5.0.13 - '@cspell/dict-public-licenses': 2.0.11 - '@cspell/dict-python': 4.2.12 - '@cspell/dict-r': 2.0.4 - '@cspell/dict-ruby': 5.0.7 - '@cspell/dict-rust': 4.0.10 - '@cspell/dict-scala': 5.0.6 - '@cspell/dict-software-terms': 4.1.17 - '@cspell/dict-sql': 2.1.8 - '@cspell/dict-svelte': 1.0.5 - '@cspell/dict-swift': 2.0.4 - '@cspell/dict-terraform': 1.0.6 - '@cspell/dict-typescript': 3.1.11 - '@cspell/dict-vue': 3.0.3 - - '@cspell/cspell-json-reporter@8.16.0': - dependencies: - '@cspell/cspell-types': 8.16.0 - - '@cspell/cspell-pipe@8.16.0': {} - - '@cspell/cspell-resolver@8.16.0': + '@cspell/dict-lorem-ipsum': 4.0.4 + '@cspell/dict-lua': 4.0.7 + '@cspell/dict-makefile': 1.0.4 + '@cspell/dict-markdown': 2.0.9(@cspell/dict-css@4.0.17)(@cspell/dict-html-symbol-entities@4.0.3)(@cspell/dict-html@4.0.11)(@cspell/dict-typescript@3.2.0) + '@cspell/dict-monkeyc': 1.0.10 + '@cspell/dict-node': 5.0.6 + '@cspell/dict-npm': 5.1.31 + '@cspell/dict-php': 4.0.14 + '@cspell/dict-powershell': 5.0.14 + '@cspell/dict-public-licenses': 2.0.13 + '@cspell/dict-python': 4.2.16 + '@cspell/dict-r': 2.1.0 + '@cspell/dict-ruby': 5.0.8 + '@cspell/dict-rust': 4.0.11 + '@cspell/dict-scala': 5.0.7 + '@cspell/dict-shell': 1.1.0 + '@cspell/dict-software-terms': 5.0.2 + '@cspell/dict-sql': 2.2.0 + '@cspell/dict-svelte': 1.0.6 + '@cspell/dict-swift': 2.0.5 + '@cspell/dict-terraform': 1.1.1 + '@cspell/dict-typescript': 3.2.0 + '@cspell/dict-vue': 3.0.4 + + '@cspell/cspell-json-reporter@8.18.0': + dependencies: + '@cspell/cspell-types': 8.18.0 + + '@cspell/cspell-pipe@8.18.0': {} + + '@cspell/cspell-resolver@8.18.0': dependencies: global-directory: 4.0.1 - '@cspell/cspell-service-bus@8.16.0': {} + '@cspell/cspell-service-bus@8.18.0': {} - '@cspell/cspell-types@8.16.0': {} + '@cspell/cspell-types@8.18.0': {} - '@cspell/dict-ada@4.0.5': {} + '@cspell/dict-ada@4.1.0': {} - '@cspell/dict-al@1.0.3': {} + '@cspell/dict-al@1.1.0': {} - '@cspell/dict-aws@4.0.7': {} + '@cspell/dict-aws@4.0.9': {} - '@cspell/dict-bash@4.1.8': {} + '@cspell/dict-bash@4.2.0': + dependencies: + '@cspell/dict-shell': 1.1.0 - '@cspell/dict-companies@3.1.7': {} + '@cspell/dict-companies@3.1.14': {} - '@cspell/dict-cpp@6.0.2': {} + '@cspell/dict-cpp@6.0.6': {} - '@cspell/dict-cryptocurrencies@5.0.3': {} + '@cspell/dict-cryptocurrencies@5.0.4': {} - '@cspell/dict-csharp@4.0.5': {} + '@cspell/dict-csharp@4.0.6': {} - '@cspell/dict-css@4.0.16': {} + '@cspell/dict-css@4.0.17': {} - '@cspell/dict-dart@2.2.4': {} + '@cspell/dict-dart@2.3.0': {} - '@cspell/dict-data-science@2.0.5': {} + '@cspell/dict-data-science@2.0.7': {} - '@cspell/dict-django@4.1.3': {} + '@cspell/dict-django@4.1.4': {} - '@cspell/dict-docker@1.1.11': {} + '@cspell/dict-docker@1.1.12': {} - '@cspell/dict-dotnet@5.0.8': {} + '@cspell/dict-dotnet@5.0.9': {} - '@cspell/dict-elixir@4.0.6': {} + '@cspell/dict-elixir@4.0.7': {} - '@cspell/dict-en-common-misspellings@2.0.7': {} + '@cspell/dict-en-common-misspellings@2.0.10': {} '@cspell/dict-en-gb@1.1.33': {} - '@cspell/dict-en_us@4.3.28': {} + '@cspell/dict-en_us@4.3.35': {} - '@cspell/dict-filetypes@3.0.8': {} + '@cspell/dict-filetypes@3.0.11': {} - '@cspell/dict-flutter@1.0.3': {} + '@cspell/dict-flutter@1.1.0': {} - '@cspell/dict-fonts@4.0.3': {} + '@cspell/dict-fonts@4.0.4': {} - '@cspell/dict-fsharp@1.0.4': {} + '@cspell/dict-fsharp@1.1.0': {} - '@cspell/dict-fullstack@3.2.3': {} + '@cspell/dict-fullstack@3.2.6': {} - '@cspell/dict-gaming-terms@1.0.8': {} + '@cspell/dict-gaming-terms@1.1.0': {} - '@cspell/dict-git@3.0.3': {} + '@cspell/dict-git@3.0.4': {} - '@cspell/dict-golang@6.0.17': {} + '@cspell/dict-golang@6.0.19': {} - '@cspell/dict-google@1.0.4': {} + '@cspell/dict-google@1.0.8': {} - '@cspell/dict-haskell@4.0.4': {} + '@cspell/dict-haskell@4.0.5': {} '@cspell/dict-html-symbol-entities@4.0.3': {} - '@cspell/dict-html@4.0.10': {} + '@cspell/dict-html@4.0.11': {} - '@cspell/dict-java@5.0.10': {} + '@cspell/dict-java@5.0.11': {} - '@cspell/dict-julia@1.0.4': {} + '@cspell/dict-julia@1.1.0': {} - '@cspell/dict-k8s@1.0.9': {} + '@cspell/dict-k8s@1.0.10': {} + + '@cspell/dict-kotlin@1.1.0': {} '@cspell/dict-latex@4.0.3': {} - '@cspell/dict-lorem-ipsum@4.0.3': {} + '@cspell/dict-lorem-ipsum@4.0.4': {} - '@cspell/dict-lua@4.0.6': {} + '@cspell/dict-lua@4.0.7': {} - '@cspell/dict-makefile@1.0.3': {} + '@cspell/dict-makefile@1.0.4': {} - '@cspell/dict-markdown@2.0.7(@cspell/dict-css@4.0.16)(@cspell/dict-html-symbol-entities@4.0.3)(@cspell/dict-html@4.0.10)(@cspell/dict-typescript@3.1.11)': + '@cspell/dict-markdown@2.0.9(@cspell/dict-css@4.0.17)(@cspell/dict-html-symbol-entities@4.0.3)(@cspell/dict-html@4.0.11)(@cspell/dict-typescript@3.2.0)': dependencies: - '@cspell/dict-css': 4.0.16 - '@cspell/dict-html': 4.0.10 + '@cspell/dict-css': 4.0.17 + '@cspell/dict-html': 4.0.11 '@cspell/dict-html-symbol-entities': 4.0.3 - '@cspell/dict-typescript': 3.1.11 + '@cspell/dict-typescript': 3.2.0 - '@cspell/dict-monkeyc@1.0.9': {} + '@cspell/dict-monkeyc@1.0.10': {} - '@cspell/dict-node@5.0.5': {} + '@cspell/dict-node@5.0.6': {} - '@cspell/dict-npm@5.1.14': {} + '@cspell/dict-npm@5.1.31': {} - '@cspell/dict-php@4.0.13': {} + '@cspell/dict-php@4.0.14': {} - '@cspell/dict-powershell@5.0.13': {} + '@cspell/dict-powershell@5.0.14': {} - '@cspell/dict-public-licenses@2.0.11': {} + '@cspell/dict-public-licenses@2.0.13': {} - '@cspell/dict-python@4.2.12': + '@cspell/dict-python@4.2.16': dependencies: - '@cspell/dict-data-science': 2.0.5 + '@cspell/dict-data-science': 2.0.7 + + '@cspell/dict-r@2.1.0': {} - '@cspell/dict-r@2.0.4': {} + '@cspell/dict-ruby@5.0.8': {} - '@cspell/dict-ruby@5.0.7': {} + '@cspell/dict-rust@4.0.11': {} - '@cspell/dict-rust@4.0.10': {} + '@cspell/dict-scala@5.0.7': {} - '@cspell/dict-scala@5.0.6': {} + '@cspell/dict-shell@1.1.0': {} - '@cspell/dict-software-terms@4.1.17': {} + '@cspell/dict-software-terms@5.0.2': {} - '@cspell/dict-sql@2.1.8': {} + '@cspell/dict-sql@2.2.0': {} - '@cspell/dict-svelte@1.0.5': {} + '@cspell/dict-svelte@1.0.6': {} - '@cspell/dict-swift@2.0.4': {} + '@cspell/dict-swift@2.0.5': {} - '@cspell/dict-terraform@1.0.6': {} + '@cspell/dict-terraform@1.1.1': {} - '@cspell/dict-typescript@3.1.11': {} + '@cspell/dict-typescript@3.2.0': {} - '@cspell/dict-vue@3.0.3': {} + '@cspell/dict-vue@3.0.4': {} - '@cspell/dynamic-import@8.16.0': + '@cspell/dynamic-import@8.18.0': dependencies: + '@cspell/url': 8.18.0 import-meta-resolve: 4.1.0 - '@cspell/filetypes@8.16.0': {} + '@cspell/filetypes@8.18.0': {} - '@cspell/strong-weak-map@8.16.0': {} + '@cspell/strong-weak-map@8.18.0': {} - '@cspell/url@8.16.0': {} + '@cspell/url@8.18.0': {} '@nodelib/fs.scandir@2.1.5': dependencies: @@ -1292,96 +923,50 @@ snapshots: '@nodelib/fs.walk@1.2.8': dependencies: '@nodelib/fs.scandir': 2.1.5 - fastq: 1.17.1 + fastq: 1.19.1 - '@textlint/ast-node-types@12.6.1': {} + '@sindresorhus/merge-streams@2.3.0': {} - '@textlint/markdown-to-ast@12.6.1': + '@types/debug@4.1.12': dependencies: - '@textlint/ast-node-types': 12.6.1 - debug: 4.3.7 - mdast-util-gfm-autolink-literal: 0.1.3 - remark-footnotes: 3.0.0 - remark-frontmatter: 3.0.0 - remark-gfm: 1.0.0 - remark-parse: 9.0.0 - traverse: 0.6.10 - unified: 9.2.2 - transitivePeerDependencies: - - supports-color + '@types/ms': 2.1.0 - '@types/mdast@3.0.15': - dependencies: - '@types/unist': 2.0.11 + '@types/katex@0.16.7': {} - '@types/unist@2.0.11': {} + '@types/ms@2.1.0': {} - anchor-markdown-header@0.6.0: - dependencies: - emoji-regex: 10.1.0 + '@types/unist@2.0.11': {} argparse@2.0.1: {} - array-buffer-byte-length@1.0.1: - dependencies: - call-bind: 1.0.7 - is-array-buffer: 3.0.4 - array-timsort@1.0.3: {} - array-union@3.0.1: {} - - arraybuffer.prototype.slice@1.0.3: - dependencies: - array-buffer-byte-length: 1.0.1 - call-bind: 1.0.7 - define-properties: 1.2.1 - es-abstract: 1.23.5 - es-errors: 1.3.0 - get-intrinsic: 1.2.4 - is-array-buffer: 3.0.4 - is-shared-array-buffer: 1.0.3 - - available-typed-arrays@1.0.7: - dependencies: - possible-typed-array-names: 1.0.0 - - bail@1.0.5: {} - braces@3.0.3: dependencies: fill-range: 7.1.1 - call-bind@1.0.7: - dependencies: - es-define-property: 1.0.0 - es-errors: 1.3.0 - function-bind: 1.1.2 - get-intrinsic: 1.2.4 - set-function-length: 1.2.2 - callsites@3.1.0: {} - ccount@1.1.0: {} - chalk-template@1.1.0: dependencies: - chalk: 5.3.0 + chalk: 5.4.1 - chalk@5.3.0: {} + chalk@5.4.1: {} - character-entities-legacy@1.1.4: {} + character-entities-legacy@3.0.0: {} - character-entities@1.2.4: {} + character-entities@2.0.2: {} - character-reference-invalid@1.1.4: {} + character-reference-invalid@2.0.1: {} clear-module@4.1.2: dependencies: parent-module: 2.0.0 resolve-from: 5.0.0 - commander@12.1.0: {} + commander@13.1.0: {} + + commander@8.3.0: {} comment-json@4.2.5: dependencies: @@ -1393,270 +978,131 @@ snapshots: core-util-is@1.0.3: {} - cspell-config-lib@8.16.0: + cspell-config-lib@8.18.0: dependencies: - '@cspell/cspell-types': 8.16.0 + '@cspell/cspell-types': 8.18.0 comment-json: 4.2.5 - yaml: 2.6.1 + yaml: 2.7.0 - cspell-dictionary@8.16.0: + cspell-dictionary@8.18.0: dependencies: - '@cspell/cspell-pipe': 8.16.0 - '@cspell/cspell-types': 8.16.0 - cspell-trie-lib: 8.16.0 - fast-equals: 5.0.1 + '@cspell/cspell-pipe': 8.18.0 + '@cspell/cspell-types': 8.18.0 + cspell-trie-lib: 8.18.0 + fast-equals: 5.2.2 - cspell-gitignore@8.16.0: + cspell-gitignore@8.18.0: dependencies: - '@cspell/url': 8.16.0 - cspell-glob: 8.16.0 - cspell-io: 8.16.0 - find-up-simple: 1.0.0 + '@cspell/url': 8.18.0 + cspell-glob: 8.18.0 + cspell-io: 8.18.0 - cspell-glob@8.16.0: + cspell-glob@8.18.0: dependencies: - '@cspell/url': 8.16.0 + '@cspell/url': 8.18.0 micromatch: 4.0.8 - cspell-grammar@8.16.0: + cspell-grammar@8.18.0: dependencies: - '@cspell/cspell-pipe': 8.16.0 - '@cspell/cspell-types': 8.16.0 + '@cspell/cspell-pipe': 8.18.0 + '@cspell/cspell-types': 8.18.0 - cspell-io@8.16.0: + cspell-io@8.18.0: dependencies: - '@cspell/cspell-service-bus': 8.16.0 - '@cspell/url': 8.16.0 + '@cspell/cspell-service-bus': 8.18.0 + '@cspell/url': 8.18.0 - cspell-lib@8.16.0: + cspell-lib@8.18.0: dependencies: - '@cspell/cspell-bundled-dicts': 8.16.0 - '@cspell/cspell-pipe': 8.16.0 - '@cspell/cspell-resolver': 8.16.0 - '@cspell/cspell-types': 8.16.0 - '@cspell/dynamic-import': 8.16.0 - '@cspell/filetypes': 8.16.0 - '@cspell/strong-weak-map': 8.16.0 - '@cspell/url': 8.16.0 + '@cspell/cspell-bundled-dicts': 8.18.0 + '@cspell/cspell-pipe': 8.18.0 + '@cspell/cspell-resolver': 8.18.0 + '@cspell/cspell-types': 8.18.0 + '@cspell/dynamic-import': 8.18.0 + '@cspell/filetypes': 8.18.0 + '@cspell/strong-weak-map': 8.18.0 + '@cspell/url': 8.18.0 clear-module: 4.1.2 comment-json: 4.2.5 - cspell-config-lib: 8.16.0 - cspell-dictionary: 8.16.0 - cspell-glob: 8.16.0 - cspell-grammar: 8.16.0 - cspell-io: 8.16.0 - cspell-trie-lib: 8.16.0 + cspell-config-lib: 8.18.0 + cspell-dictionary: 8.18.0 + cspell-glob: 8.18.0 + cspell-grammar: 8.18.0 + cspell-io: 8.18.0 + cspell-trie-lib: 8.18.0 env-paths: 3.0.0 - fast-equals: 5.0.1 + fast-equals: 5.2.2 gensequence: 7.0.0 - import-fresh: 3.3.0 + import-fresh: 3.3.1 resolve-from: 5.0.0 vscode-languageserver-textdocument: 1.0.12 - vscode-uri: 3.0.8 + vscode-uri: 3.1.0 xdg-basedir: 5.1.0 - cspell-trie-lib@8.16.0: + cspell-trie-lib@8.18.0: dependencies: - '@cspell/cspell-pipe': 8.16.0 - '@cspell/cspell-types': 8.16.0 + '@cspell/cspell-pipe': 8.18.0 + '@cspell/cspell-types': 8.18.0 gensequence: 7.0.0 - cspell@8.16.0: + cspell@8.18.0: dependencies: - '@cspell/cspell-json-reporter': 8.16.0 - '@cspell/cspell-pipe': 8.16.0 - '@cspell/cspell-types': 8.16.0 - '@cspell/dynamic-import': 8.16.0 - '@cspell/url': 8.16.0 - chalk: 5.3.0 + '@cspell/cspell-json-reporter': 8.18.0 + '@cspell/cspell-pipe': 8.18.0 + '@cspell/cspell-types': 8.18.0 + '@cspell/dynamic-import': 8.18.0 + '@cspell/url': 8.18.0 + chalk: 5.4.1 chalk-template: 1.1.0 - commander: 12.1.0 - cspell-dictionary: 8.16.0 - cspell-gitignore: 8.16.0 - cspell-glob: 8.16.0 - cspell-io: 8.16.0 - cspell-lib: 8.16.0 + commander: 13.1.0 + cspell-dictionary: 8.18.0 + cspell-gitignore: 8.18.0 + cspell-glob: 8.18.0 + cspell-io: 8.18.0 + cspell-lib: 8.18.0 fast-json-stable-stringify: 2.1.0 file-entry-cache: 9.1.0 get-stdin: 9.0.0 - semver: 7.6.3 - tinyglobby: 0.2.10 - - data-view-buffer@1.0.1: - dependencies: - call-bind: 1.0.7 - es-errors: 1.3.0 - is-data-view: 1.0.1 - - data-view-byte-length@1.0.1: - dependencies: - call-bind: 1.0.7 - es-errors: 1.3.0 - is-data-view: 1.0.1 - - data-view-byte-offset@1.0.0: - dependencies: - call-bind: 1.0.7 - es-errors: 1.3.0 - is-data-view: 1.0.1 + semver: 7.7.1 + tinyglobby: 0.2.12 - debug@4.3.7: + debug@4.4.0: dependencies: ms: 2.1.3 - define-data-property@1.1.4: - dependencies: - es-define-property: 1.0.0 - es-errors: 1.3.0 - gopd: 1.0.1 - - define-properties@1.2.1: - dependencies: - define-data-property: 1.1.4 - has-property-descriptors: 1.0.2 - object-keys: 1.1.1 - - dir-glob@3.0.1: - dependencies: - path-type: 4.0.0 - - doctoc@2.2.1: - dependencies: - '@textlint/markdown-to-ast': 12.6.1 - anchor-markdown-header: 0.6.0 - htmlparser2: 7.2.0 - minimist: 1.2.8 - underscore: 1.13.7 - update-section: 0.3.3 - transitivePeerDependencies: - - supports-color - - dom-serializer@1.4.1: + decode-named-character-reference@1.1.0: dependencies: - domelementtype: 2.3.0 - domhandler: 4.3.1 - entities: 2.2.0 - - domelementtype@2.3.0: {} + character-entities: 2.0.2 - domhandler@4.3.1: - dependencies: - domelementtype: 2.3.0 + dequal@2.0.3: {} - domutils@2.8.0: + devlop@1.1.0: dependencies: - dom-serializer: 1.4.1 - domelementtype: 2.3.0 - domhandler: 4.3.1 - - emoji-regex@10.1.0: {} - - entities@2.1.0: {} - - entities@2.2.0: {} + dequal: 2.0.3 - entities@3.0.1: {} + entities@4.5.0: {} env-paths@3.0.0: {} - es-abstract@1.23.5: - dependencies: - array-buffer-byte-length: 1.0.1 - arraybuffer.prototype.slice: 1.0.3 - available-typed-arrays: 1.0.7 - call-bind: 1.0.7 - data-view-buffer: 1.0.1 - data-view-byte-length: 1.0.1 - data-view-byte-offset: 1.0.0 - es-define-property: 1.0.0 - es-errors: 1.3.0 - es-object-atoms: 1.0.0 - es-set-tostringtag: 2.0.3 - es-to-primitive: 1.3.0 - function.prototype.name: 1.1.6 - get-intrinsic: 1.2.4 - get-symbol-description: 1.0.2 - globalthis: 1.0.4 - gopd: 1.0.1 - has-property-descriptors: 1.0.2 - has-proto: 1.0.3 - has-symbols: 1.0.3 - hasown: 2.0.2 - internal-slot: 1.0.7 - is-array-buffer: 3.0.4 - is-callable: 1.2.7 - is-data-view: 1.0.1 - is-negative-zero: 2.0.3 - is-regex: 1.1.4 - is-shared-array-buffer: 1.0.3 - is-string: 1.0.7 - is-typed-array: 1.1.13 - is-weakref: 1.0.2 - object-inspect: 1.13.3 - object-keys: 1.1.1 - object.assign: 4.1.5 - regexp.prototype.flags: 1.5.3 - safe-array-concat: 1.1.2 - safe-regex-test: 1.0.3 - string.prototype.trim: 1.2.9 - string.prototype.trimend: 1.0.8 - string.prototype.trimstart: 1.0.8 - typed-array-buffer: 1.0.2 - typed-array-byte-length: 1.0.1 - typed-array-byte-offset: 1.0.3 - typed-array-length: 1.0.7 - unbox-primitive: 1.0.2 - which-typed-array: 1.1.15 - - es-define-property@1.0.0: - dependencies: - get-intrinsic: 1.2.4 - - es-errors@1.3.0: {} - - es-object-atoms@1.0.0: - dependencies: - es-errors: 1.3.0 - - es-set-tostringtag@2.0.3: - dependencies: - get-intrinsic: 1.2.4 - has-tostringtag: 1.0.2 - hasown: 2.0.2 - - es-to-primitive@1.3.0: - dependencies: - is-callable: 1.2.7 - is-date-object: 1.0.5 - is-symbol: 1.0.4 - - escape-string-regexp@4.0.0: {} - esprima@4.0.1: {} - extend@3.0.2: {} - - fast-equals@5.0.1: {} + fast-equals@5.2.2: {} - fast-glob@3.3.2: + fast-glob@3.3.3: dependencies: '@nodelib/fs.stat': 2.0.5 '@nodelib/fs.walk': 1.2.8 glob-parent: 5.1.2 merge2: 1.4.1 - micromatch: 4.0.4 + micromatch: 4.0.8 fast-json-stable-stringify@2.1.0: {} - fastq@1.17.1: + fastq@1.19.1: dependencies: - reusify: 1.0.4 + reusify: 1.1.0 - fault@1.0.4: - dependencies: - format: 0.2.2 - - fdir@6.4.2(picomatch@4.0.2): + fdir@6.4.3(picomatch@4.0.2): optionalDependencies: picomatch: 4.0.2 @@ -1668,50 +1114,17 @@ snapshots: dependencies: to-regex-range: 5.0.1 - find-up-simple@1.0.0: {} - flat-cache@5.0.0: dependencies: - flatted: 3.3.2 + flatted: 3.3.3 keyv: 4.5.4 - flatted@3.3.2: {} - - for-each@0.3.3: - dependencies: - is-callable: 1.2.7 - - format@0.2.2: {} - - function-bind@1.1.2: {} - - function.prototype.name@1.1.6: - dependencies: - call-bind: 1.0.7 - define-properties: 1.2.1 - es-abstract: 1.23.5 - functions-have-names: 1.2.3 - - functions-have-names@1.2.3: {} + flatted@3.3.3: {} gensequence@7.0.0: {} - get-intrinsic@1.2.4: - dependencies: - es-errors: 1.3.0 - function-bind: 1.1.2 - has-proto: 1.0.3 - has-symbols: 1.0.3 - hasown: 2.0.2 - get-stdin@9.0.0: {} - get-symbol-description@1.0.2: - dependencies: - call-bind: 1.0.7 - es-errors: 1.3.0 - get-intrinsic: 1.2.4 - glob-parent@5.1.2: dependencies: is-glob: 4.0.3 @@ -1720,54 +1133,20 @@ snapshots: dependencies: ini: 4.1.1 - globalthis@1.0.4: - dependencies: - define-properties: 1.2.1 - gopd: 1.0.1 - - globby@12.1.0: + globby@14.0.2: dependencies: - array-union: 3.0.1 - dir-glob: 3.0.1 - fast-glob: 3.3.2 + '@sindresorhus/merge-streams': 2.3.0 + fast-glob: 3.3.3 ignore: 5.3.2 - merge2: 1.4.1 - slash: 4.0.0 - - gopd@1.0.1: - dependencies: - get-intrinsic: 1.2.4 - - has-bigints@1.0.2: {} + path-type: 5.0.0 + slash: 5.1.0 + unicorn-magic: 0.1.0 has-own-prop@2.0.0: {} - has-property-descriptors@1.0.2: - dependencies: - es-define-property: 1.0.0 - - has-proto@1.0.3: {} - - has-symbols@1.0.3: {} - - has-tostringtag@1.0.2: - dependencies: - has-symbols: 1.0.3 - - hasown@2.0.2: - dependencies: - function-bind: 1.1.2 - - htmlparser2@7.2.0: - dependencies: - domelementtype: 2.3.0 - domhandler: 4.3.1 - domutils: 2.8.0 - entities: 3.0.1 - ignore@5.3.2: {} - import-fresh@3.3.0: + import-fresh@3.3.1: dependencies: parent-module: 1.0.1 resolve-from: 4.0.0 @@ -1776,311 +1155,266 @@ snapshots: ini@4.1.1: {} - internal-slot@1.0.7: - dependencies: - es-errors: 1.3.0 - hasown: 2.0.2 - side-channel: 1.0.6 - - is-alphabetical@1.0.4: {} - - is-alphanumerical@1.0.4: - dependencies: - is-alphabetical: 1.0.4 - is-decimal: 1.0.4 - - is-array-buffer@3.0.4: - dependencies: - call-bind: 1.0.7 - get-intrinsic: 1.2.4 - - is-async-function@2.0.0: - dependencies: - has-tostringtag: 1.0.2 - - is-bigint@1.0.4: - dependencies: - has-bigints: 1.0.2 - - is-boolean-object@1.1.2: - dependencies: - call-bind: 1.0.7 - has-tostringtag: 1.0.2 - - is-buffer@2.0.5: {} - - is-callable@1.2.7: {} - - is-data-view@1.0.1: - dependencies: - is-typed-array: 1.1.13 + is-alphabetical@2.0.1: {} - is-date-object@1.0.5: + is-alphanumerical@2.0.1: dependencies: - has-tostringtag: 1.0.2 + is-alphabetical: 2.0.1 + is-decimal: 2.0.1 - is-decimal@1.0.4: {} + is-decimal@2.0.1: {} is-extglob@2.1.1: {} - is-finalizationregistry@1.1.0: - dependencies: - call-bind: 1.0.7 - - is-generator-function@1.0.10: - dependencies: - has-tostringtag: 1.0.2 - is-glob@4.0.3: dependencies: is-extglob: 2.1.1 - is-hexadecimal@1.0.4: {} - - is-map@2.0.3: {} - - is-negative-zero@2.0.3: {} - - is-number-object@1.0.7: - dependencies: - has-tostringtag: 1.0.2 + is-hexadecimal@2.0.1: {} is-number@7.0.0: {} - is-plain-obj@2.1.0: {} - - is-regex@1.1.4: + js-yaml@4.1.0: dependencies: - call-bind: 1.0.7 - has-tostringtag: 1.0.2 + argparse: 2.0.1 - is-set@2.0.3: {} + json-buffer@3.0.1: {} - is-shared-array-buffer@1.0.3: - dependencies: - call-bind: 1.0.7 + jsonc-parser@3.3.1: {} - is-string@1.0.7: + katex@0.16.21: dependencies: - has-tostringtag: 1.0.2 + commander: 8.3.0 - is-symbol@1.0.4: + keyv@4.5.4: dependencies: - has-symbols: 1.0.3 + json-buffer: 3.0.1 - is-typed-array@1.1.13: + linkify-it@5.0.0: dependencies: - which-typed-array: 1.1.15 + uc.micro: 2.1.0 - is-weakmap@2.0.2: {} + markdown-it@14.1.0: + dependencies: + argparse: 2.0.1 + entities: 4.5.0 + linkify-it: 5.0.0 + mdurl: 2.0.0 + punycode.js: 2.3.1 + uc.micro: 2.1.0 - is-weakref@1.0.2: + markdownlint-cli2-formatter-default@0.0.5(markdownlint-cli2@0.17.2): dependencies: - call-bind: 1.0.7 + markdownlint-cli2: 0.17.2 - is-weakset@2.0.3: + markdownlint-cli2@0.17.2: dependencies: - call-bind: 1.0.7 - get-intrinsic: 1.2.4 + globby: 14.0.2 + js-yaml: 4.1.0 + jsonc-parser: 3.3.1 + markdownlint: 0.37.4 + markdownlint-cli2-formatter-default: 0.0.5(markdownlint-cli2@0.17.2) + micromatch: 4.0.8 + transitivePeerDependencies: + - supports-color - isarray@2.0.5: {} + markdownlint@0.37.4: + dependencies: + markdown-it: 14.1.0 + micromark: 4.0.1 + micromark-core-commonmark: 2.0.2 + micromark-extension-directive: 3.0.2 + micromark-extension-gfm-autolink-literal: 2.1.0 + micromark-extension-gfm-footnote: 2.1.0 + micromark-extension-gfm-table: 2.1.0 + micromark-extension-math: 3.1.0 + micromark-util-types: 2.0.1 + transitivePeerDependencies: + - supports-color - json-buffer@3.0.1: {} + mdurl@2.0.0: {} - keyv@4.5.4: - dependencies: - json-buffer: 3.0.1 + merge2@1.4.1: {} - linkify-it@3.0.3: + micromark-core-commonmark@2.0.2: dependencies: - uc.micro: 1.0.6 - - longest-streak@2.0.4: {} + decode-named-character-reference: 1.1.0 + devlop: 1.1.0 + micromark-factory-destination: 2.0.1 + micromark-factory-label: 2.0.1 + micromark-factory-space: 2.0.1 + micromark-factory-title: 2.0.1 + micromark-factory-whitespace: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-chunked: 2.0.1 + micromark-util-classify-character: 2.0.1 + micromark-util-html-tag-name: 2.0.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-subtokenize: 2.1.0 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 - markdown-it@12.3.2: + micromark-extension-directive@3.0.2: dependencies: - argparse: 2.0.1 - entities: 2.1.0 - linkify-it: 3.0.3 - mdurl: 1.0.1 - uc.micro: 1.0.6 + devlop: 1.1.0 + micromark-factory-space: 2.0.1 + micromark-factory-whitespace: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 + parse-entities: 4.0.2 - markdown-table@2.0.0: + micromark-extension-gfm-autolink-literal@2.1.0: dependencies: - repeat-string: 1.6.1 + micromark-util-character: 2.1.1 + micromark-util-sanitize-uri: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 - markdownlint-cli2-formatter-default@0.0.3(markdownlint-cli2@0.4.0): + micromark-extension-gfm-footnote@2.1.0: dependencies: - markdownlint-cli2: 0.4.0 + devlop: 1.1.0 + micromark-core-commonmark: 2.0.2 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-sanitize-uri: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 - markdownlint-cli2@0.4.0: + micromark-extension-gfm-table@2.1.0: dependencies: - globby: 12.1.0 - markdownlint: 0.25.1 - markdownlint-cli2-formatter-default: 0.0.3(markdownlint-cli2@0.4.0) - markdownlint-rule-helpers: 0.16.0 - micromatch: 4.0.4 - strip-json-comments: 4.0.0 - yaml: 1.10.2 - - markdownlint-rule-helpers@0.16.0: {} + devlop: 1.1.0 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 - markdownlint@0.25.1: + micromark-extension-math@3.1.0: dependencies: - markdown-it: 12.3.2 + '@types/katex': 0.16.7 + devlop: 1.1.0 + katex: 0.16.21 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 - mdast-util-find-and-replace@1.1.1: + micromark-factory-destination@2.0.1: dependencies: - escape-string-regexp: 4.0.0 - unist-util-is: 4.1.0 - unist-util-visit-parents: 3.1.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 - mdast-util-footnote@0.1.7: + micromark-factory-label@2.0.1: dependencies: - mdast-util-to-markdown: 0.6.5 - micromark: 2.11.4 - transitivePeerDependencies: - - supports-color + devlop: 1.1.0 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 - mdast-util-from-markdown@0.8.5: + micromark-factory-space@2.0.1: dependencies: - '@types/mdast': 3.0.15 - mdast-util-to-string: 2.0.0 - micromark: 2.11.4 - parse-entities: 2.0.0 - unist-util-stringify-position: 2.0.3 - transitivePeerDependencies: - - supports-color + micromark-util-character: 2.1.1 + micromark-util-types: 2.0.1 - mdast-util-frontmatter@0.2.0: + micromark-factory-title@2.0.1: dependencies: - micromark-extension-frontmatter: 0.2.2 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 - mdast-util-gfm-autolink-literal@0.1.3: + micromark-factory-whitespace@2.0.1: dependencies: - ccount: 1.1.0 - mdast-util-find-and-replace: 1.1.1 - micromark: 2.11.4 - transitivePeerDependencies: - - supports-color + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 - mdast-util-gfm-strikethrough@0.2.3: + micromark-util-character@2.1.1: dependencies: - mdast-util-to-markdown: 0.6.5 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 - mdast-util-gfm-table@0.1.6: + micromark-util-chunked@2.0.1: dependencies: - markdown-table: 2.0.0 - mdast-util-to-markdown: 0.6.5 + micromark-util-symbol: 2.0.1 - mdast-util-gfm-task-list-item@0.1.6: + micromark-util-classify-character@2.0.1: dependencies: - mdast-util-to-markdown: 0.6.5 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 - mdast-util-gfm@0.1.2: + micromark-util-combine-extensions@2.0.1: dependencies: - mdast-util-gfm-autolink-literal: 0.1.3 - mdast-util-gfm-strikethrough: 0.2.3 - mdast-util-gfm-table: 0.1.6 - mdast-util-gfm-task-list-item: 0.1.6 - mdast-util-to-markdown: 0.6.5 - transitivePeerDependencies: - - supports-color + micromark-util-chunked: 2.0.1 + micromark-util-types: 2.0.1 - mdast-util-to-markdown@0.6.5: + micromark-util-decode-numeric-character-reference@2.0.2: dependencies: - '@types/unist': 2.0.11 - longest-streak: 2.0.4 - mdast-util-to-string: 2.0.0 - parse-entities: 2.0.0 - repeat-string: 1.6.1 - zwitch: 1.0.5 + micromark-util-symbol: 2.0.1 - mdast-util-to-string@2.0.0: {} + micromark-util-encode@2.0.1: {} - mdurl@1.0.1: {} + micromark-util-html-tag-name@2.0.1: {} - merge2@1.4.1: {} - - micromark-extension-footnote@0.3.2: + micromark-util-normalize-identifier@2.0.1: dependencies: - micromark: 2.11.4 - transitivePeerDependencies: - - supports-color + micromark-util-symbol: 2.0.1 - micromark-extension-frontmatter@0.2.2: + micromark-util-resolve-all@2.0.1: dependencies: - fault: 1.0.4 + micromark-util-types: 2.0.1 - micromark-extension-gfm-autolink-literal@0.5.7: + micromark-util-sanitize-uri@2.0.1: dependencies: - micromark: 2.11.4 - transitivePeerDependencies: - - supports-color + micromark-util-character: 2.1.1 + micromark-util-encode: 2.0.1 + micromark-util-symbol: 2.0.1 - micromark-extension-gfm-strikethrough@0.6.5: + micromark-util-subtokenize@2.1.0: dependencies: - micromark: 2.11.4 - transitivePeerDependencies: - - supports-color + devlop: 1.1.0 + micromark-util-chunked: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 - micromark-extension-gfm-table@0.4.3: - dependencies: - micromark: 2.11.4 - transitivePeerDependencies: - - supports-color + micromark-util-symbol@2.0.1: {} - micromark-extension-gfm-tagfilter@0.3.0: {} + micromark-util-types@2.0.1: {} - micromark-extension-gfm-task-list-item@0.3.3: + micromark@4.0.1: dependencies: - micromark: 2.11.4 + '@types/debug': 4.1.12 + debug: 4.4.0 + decode-named-character-reference: 1.1.0 + devlop: 1.1.0 + micromark-core-commonmark: 2.0.2 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-chunked: 2.0.1 + micromark-util-combine-extensions: 2.0.1 + micromark-util-decode-numeric-character-reference: 2.0.2 + micromark-util-encode: 2.0.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-sanitize-uri: 2.0.1 + micromark-util-subtokenize: 2.1.0 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 transitivePeerDependencies: - supports-color - micromark-extension-gfm@0.3.3: - dependencies: - micromark: 2.11.4 - micromark-extension-gfm-autolink-literal: 0.5.7 - micromark-extension-gfm-strikethrough: 0.6.5 - micromark-extension-gfm-table: 0.4.3 - micromark-extension-gfm-tagfilter: 0.3.0 - micromark-extension-gfm-task-list-item: 0.3.3 - transitivePeerDependencies: - - supports-color - - micromark@2.11.4: - dependencies: - debug: 4.3.7 - parse-entities: 2.0.0 - transitivePeerDependencies: - - supports-color - - micromatch@4.0.4: - dependencies: - braces: 3.0.3 - picomatch: 2.3.1 - micromatch@4.0.8: dependencies: braces: 3.0.3 picomatch: 2.3.1 - minimist@1.2.8: {} - ms@2.1.3: {} - object-inspect@1.13.3: {} - - object-keys@1.1.1: {} - - object.assign@4.1.5: - dependencies: - call-bind: 1.0.7 - define-properties: 1.2.1 - has-symbols: 1.0.3 - object-keys: 1.1.1 - parent-module@1.0.1: dependencies: callsites: 3.1.0 @@ -2089,292 +1423,59 @@ snapshots: dependencies: callsites: 3.1.0 - parse-entities@2.0.0: + parse-entities@4.0.2: dependencies: - character-entities: 1.2.4 - character-entities-legacy: 1.1.4 - character-reference-invalid: 1.1.4 - is-alphanumerical: 1.0.4 - is-decimal: 1.0.4 - is-hexadecimal: 1.0.4 + '@types/unist': 2.0.11 + character-entities-legacy: 3.0.0 + character-reference-invalid: 2.0.1 + decode-named-character-reference: 1.1.0 + is-alphanumerical: 2.0.1 + is-decimal: 2.0.1 + is-hexadecimal: 2.0.1 - path-type@4.0.0: {} + path-type@5.0.0: {} picomatch@2.3.1: {} picomatch@4.0.2: {} - possible-typed-array-names@1.0.0: {} + punycode.js@2.3.1: {} queue-microtask@1.2.3: {} - reflect.getprototypeof@1.0.7: - dependencies: - call-bind: 1.0.7 - define-properties: 1.2.1 - es-abstract: 1.23.5 - es-errors: 1.3.0 - get-intrinsic: 1.2.4 - gopd: 1.0.1 - which-builtin-type: 1.2.0 - - regexp.prototype.flags@1.5.3: - dependencies: - call-bind: 1.0.7 - define-properties: 1.2.1 - es-errors: 1.3.0 - set-function-name: 2.0.2 - - remark-footnotes@3.0.0: - dependencies: - mdast-util-footnote: 0.1.7 - micromark-extension-footnote: 0.3.2 - transitivePeerDependencies: - - supports-color - - remark-frontmatter@3.0.0: - dependencies: - mdast-util-frontmatter: 0.2.0 - micromark-extension-frontmatter: 0.2.2 - - remark-gfm@1.0.0: - dependencies: - mdast-util-gfm: 0.1.2 - micromark-extension-gfm: 0.3.3 - transitivePeerDependencies: - - supports-color - - remark-parse@9.0.0: - dependencies: - mdast-util-from-markdown: 0.8.5 - transitivePeerDependencies: - - supports-color - repeat-string@1.6.1: {} resolve-from@4.0.0: {} resolve-from@5.0.0: {} - reusify@1.0.4: {} + reusify@1.1.0: {} run-parallel@1.2.0: dependencies: queue-microtask: 1.2.3 - safe-array-concat@1.1.2: - dependencies: - call-bind: 1.0.7 - get-intrinsic: 1.2.4 - has-symbols: 1.0.3 - isarray: 2.0.5 - - safe-regex-test@1.0.3: - dependencies: - call-bind: 1.0.7 - es-errors: 1.3.0 - is-regex: 1.1.4 - - semver@7.6.3: {} - - set-function-length@1.2.2: - dependencies: - define-data-property: 1.1.4 - es-errors: 1.3.0 - function-bind: 1.1.2 - get-intrinsic: 1.2.4 - gopd: 1.0.1 - has-property-descriptors: 1.0.2 - - set-function-name@2.0.2: - dependencies: - define-data-property: 1.1.4 - es-errors: 1.3.0 - functions-have-names: 1.2.3 - has-property-descriptors: 1.0.2 + semver@7.7.1: {} - side-channel@1.0.6: - dependencies: - call-bind: 1.0.7 - es-errors: 1.3.0 - get-intrinsic: 1.2.4 - object-inspect: 1.13.3 - - slash@4.0.0: {} + slash@5.1.0: {} - string.prototype.trim@1.2.9: + tinyglobby@0.2.12: dependencies: - call-bind: 1.0.7 - define-properties: 1.2.1 - es-abstract: 1.23.5 - es-object-atoms: 1.0.0 - - string.prototype.trimend@1.0.8: - dependencies: - call-bind: 1.0.7 - define-properties: 1.2.1 - es-object-atoms: 1.0.0 - - string.prototype.trimstart@1.0.8: - dependencies: - call-bind: 1.0.7 - define-properties: 1.2.1 - es-object-atoms: 1.0.0 - - strip-json-comments@4.0.0: {} - - tinyglobby@0.2.10: - dependencies: - fdir: 6.4.2(picomatch@4.0.2) + fdir: 6.4.3(picomatch@4.0.2) picomatch: 4.0.2 to-regex-range@5.0.1: dependencies: is-number: 7.0.0 - traverse@0.6.10: - dependencies: - gopd: 1.0.1 - typedarray.prototype.slice: 1.0.3 - which-typed-array: 1.1.15 - - trough@1.0.5: {} - - typed-array-buffer@1.0.2: - dependencies: - call-bind: 1.0.7 - es-errors: 1.3.0 - is-typed-array: 1.1.13 - - typed-array-byte-length@1.0.1: - dependencies: - call-bind: 1.0.7 - for-each: 0.3.3 - gopd: 1.0.1 - has-proto: 1.0.3 - is-typed-array: 1.1.13 - - typed-array-byte-offset@1.0.3: - dependencies: - available-typed-arrays: 1.0.7 - call-bind: 1.0.7 - for-each: 0.3.3 - gopd: 1.0.1 - has-proto: 1.0.3 - is-typed-array: 1.1.13 - reflect.getprototypeof: 1.0.7 - - typed-array-length@1.0.7: - dependencies: - call-bind: 1.0.7 - for-each: 0.3.3 - gopd: 1.0.1 - is-typed-array: 1.1.13 - possible-typed-array-names: 1.0.0 - reflect.getprototypeof: 1.0.7 - - typedarray.prototype.slice@1.0.3: - dependencies: - call-bind: 1.0.7 - define-properties: 1.2.1 - es-abstract: 1.23.5 - es-errors: 1.3.0 - typed-array-buffer: 1.0.2 - typed-array-byte-offset: 1.0.3 - - uc.micro@1.0.6: {} - - unbox-primitive@1.0.2: - dependencies: - call-bind: 1.0.7 - has-bigints: 1.0.2 - has-symbols: 1.0.3 - which-boxed-primitive: 1.0.2 - - underscore@1.13.7: {} - - unified@9.2.2: - dependencies: - '@types/unist': 2.0.11 - bail: 1.0.5 - extend: 3.0.2 - is-buffer: 2.0.5 - is-plain-obj: 2.1.0 - trough: 1.0.5 - vfile: 4.2.1 - - unist-util-is@4.1.0: {} - - unist-util-stringify-position@2.0.3: - dependencies: - '@types/unist': 2.0.11 - - unist-util-visit-parents@3.1.1: - dependencies: - '@types/unist': 2.0.11 - unist-util-is: 4.1.0 - - update-section@0.3.3: {} - - vfile-message@2.0.4: - dependencies: - '@types/unist': 2.0.11 - unist-util-stringify-position: 2.0.3 + uc.micro@2.1.0: {} - vfile@4.2.1: - dependencies: - '@types/unist': 2.0.11 - is-buffer: 2.0.5 - unist-util-stringify-position: 2.0.3 - vfile-message: 2.0.4 + unicorn-magic@0.1.0: {} vscode-languageserver-textdocument@1.0.12: {} - vscode-uri@3.0.8: {} - - which-boxed-primitive@1.0.2: - dependencies: - is-bigint: 1.0.4 - is-boolean-object: 1.1.2 - is-number-object: 1.0.7 - is-string: 1.0.7 - is-symbol: 1.0.4 - - which-builtin-type@1.2.0: - dependencies: - call-bind: 1.0.7 - function.prototype.name: 1.1.6 - has-tostringtag: 1.0.2 - is-async-function: 2.0.0 - is-date-object: 1.0.5 - is-finalizationregistry: 1.1.0 - is-generator-function: 1.0.10 - is-regex: 1.1.4 - is-weakref: 1.0.2 - isarray: 2.0.5 - which-boxed-primitive: 1.0.2 - which-collection: 1.0.2 - which-typed-array: 1.1.15 - - which-collection@1.0.2: - dependencies: - is-map: 2.0.3 - is-set: 2.0.3 - is-weakmap: 2.0.2 - is-weakset: 2.0.3 - - which-typed-array@1.1.15: - dependencies: - available-typed-arrays: 1.0.7 - call-bind: 1.0.7 - for-each: 0.3.3 - gopd: 1.0.1 - has-tostringtag: 1.0.2 + vscode-uri@3.1.0: {} xdg-basedir@5.1.0: {} - yaml@1.10.2: {} - - yaml@2.6.1: {} - - zwitch@1.0.5: {} + yaml@2.7.0: {} diff --git a/book/src/advanced-usage/new-extension.md b/book/src/advanced-usage/new-extension.md index ecbd7dd134..78a1d04b3b 100644 --- a/book/src/advanced-usage/new-extension.md +++ b/book/src/advanced-usage/new-extension.md @@ -10,6 +10,7 @@ Extensions in OpenVM let you introduce additional functionality without disrupti This modular architecture means the extension cleanly adds new capabilities while leaving the rest of OpenVM untouched. The entire system, including the extension’s operations, can still be proven correct. Conceptually, a new extension consists of three parts: + - **Guest**: High-level Rust code that defines and uses the new operations. - **Transpiler**: Logic that converts custom RISC-V instructions into corresponding OpenVM instructions. - **Circuit**: The special chips that enforce correctness of instruction execution through polynomial constraints. @@ -37,5 +38,4 @@ The circuit component is where the extension’s logic is enforced in a zero-kno - Implements the computing logic, so that the output always corresponds to the correct result of the new operation. The chip has access to the memory shared with the other chips from the VM via [our special architecture](https://github.com/openvm-org/openvm/blob/main/docs/specs/ISA.md). - Properly constrains all the inputs, outputs and intermediate variables using polynomial equations in such a way that there is no way to fill these variables with values that correspond to an incorrect output while fitting the constraints. - For more technical details on writing circuits and constraints, consult the OpenVM [contributor documentation](https://github.com/openvm-org/openvm/blob/main/docs/specs/README.md), which provides specifications and guidelines for integrating your extension into the OpenVM framework. diff --git a/book/src/advanced-usage/sdk.md b/book/src/advanced-usage/sdk.md index 20e4b52956..16a1d9c6e5 100644 --- a/book/src/advanced-usage/sdk.md +++ b/book/src/advanced-usage/sdk.md @@ -34,7 +34,6 @@ The `SdkVmConfig` struct allows you to specify the extensions and system configu > ℹ️ > When using Rust to write the guest program, the VM system configuration should keep the default value `pointer_max_bits = 29` to match the hardcoded memory limit of the memory allocator. Otherwise, the guest program may fail due to out of bounds memory access in the VM. - ## Running a Program To run your program and see the public value output, you can do the following: @@ -81,7 +80,7 @@ After generating a proof, you can verify it. To do so, you need your verifying k ### Setup -To generate an EVM proof, you'll first need to ensure that you have followed the [CLI installation steps](../../getting-started/install.md). get the appropriate KZG params by running the following command. +To generate an EVM proof, you'll first need to ensure that you have followed the [CLI installation steps](../getting-started/install.md). get the appropriate KZG params by running the following command. ```bash cargo openvm setup @@ -125,4 +124,4 @@ You can now run the aggregation keygen, proof, and verification functions for th > ⚠️ **WARNING** > The aggregation proving key `agg_pk` above is large. Avoid cloning it if possible. -Note that `DEFAULT_PARAMS_DIR` is the directory where Halo2 parameters are stored by the `cargo openvm setup` CLI command. For more information on the setup process, see the `EVM Level` section of the [verify](../../writing-apps/verify.md) doc. +Note that `DEFAULT_PARAMS_DIR` is the directory where Halo2 parameters are stored by the `cargo openvm setup` CLI command. For more information on the setup process, see the `EVM Level` section of the [verify](../writing-apps/verify.md) doc. diff --git a/book/src/custom-extensions/ecc.md b/book/src/custom-extensions/ecc.md index 26c722fa33..331430d6ad 100644 --- a/book/src/custom-extensions/ecc.md +++ b/book/src/custom-extensions/ecc.md @@ -111,4 +111,4 @@ b = "7" The `supported_modulus` parameter is a list of moduli that the guest program will use. As mentioned in the [algebra extension](./algebra.md) chapter, the order of moduli in `[app_vm_config.modular]` must match the order in the `moduli_init!` macro. -The `ecc.supported_curves` parameter is a list of supported curves that the guest program will use. They must be provided in decimal format in the `.toml` file. For multiple curves create multiple `[[app_vm_config.ecc.supported_curves]]` sections. The order of curves in `[[app_vm_config.ecc.supported_curves]]` must match the order in the `sw_init!` macro. \ No newline at end of file +The `ecc.supported_curves` parameter is a list of supported curves that the guest program will use. They must be provided in decimal format in the `.toml` file. For multiple curves create multiple `[[app_vm_config.ecc.supported_curves]]` sections. The order of curves in `[[app_vm_config.ecc.supported_curves]]` must match the order in the `sw_init!` macro. diff --git a/book/src/getting-started/install.md b/book/src/getting-started/install.md index 11e74cd96f..2405542d65 100644 --- a/book/src/getting-started/install.md +++ b/book/src/getting-started/install.md @@ -9,7 +9,7 @@ To use OpenVM for generating proofs, you must install the OpenVM command line to Begin the installation: ```bash -cargo install --locked --git http://github.com/openvm-org/openvm.git cargo-openvm +cargo install --locked --git http://github.com/openvm-org/openvm.git --tag v1.0.0 cargo-openvm ``` This will globally install `cargo-openvm`. You can validate a successful installation with: @@ -23,7 +23,7 @@ cargo openvm --version To build from source, clone the repository and begin the installation. ```bash -git clone https://github.com/openvm-org/openvm.git +git clone --branch v1.0.0 --single-branch https://github.com/openvm-org/openvm.git cd openvm cargo install --locked --force --path crates/cli ``` diff --git a/book/src/writing-apps/overview.md b/book/src/writing-apps/overview.md index 98db17b719..c3e5a79020 100644 --- a/book/src/writing-apps/overview.md +++ b/book/src/writing-apps/overview.md @@ -28,6 +28,7 @@ For more information on both commands, see the [build](./build.md) docs. The `--input` field needs to either be a single hex string or a file path to a json file that contains the key `input` and an array of hex strings. Note that if your hex string represents a single number, it should be written in little-endian format (as this is what the VM expects). Also note that if you need to provide multiple input streams, you have to use the file path option. Each hex string (either in the file or as the direct input) is either: + - Hex string of bytes, which is prefixed with 0x01 - Hex string of native field elements (represented as u32, little endian), prefixed with 0x02 diff --git a/book/src/writing-apps/verify.md b/book/src/writing-apps/verify.md index 9f123dd2c1..4a4f4b0dc3 100644 --- a/book/src/writing-apps/verify.md +++ b/book/src/writing-apps/verify.md @@ -15,10 +15,13 @@ If you omit `--app_vk` and/or `--proof`, the command will search for those files Once again, if you omitted `--output` and `--vk_output` in the `keygen` and `prove` commands, you can omit `--app_vk` and `--proof` in the `verify` command. ## EVM Level + EVM level proof setup requires large amounts of computation and memory (~200GB). It is recommended to run this process on a server. ### Install Solc + Install `solc` `0.8.19` using `svm` + ```bash # Install svm cargo install --version 0.5.7 svm-rs @@ -37,19 +40,23 @@ The workflow for generating an end-to-end EVM proof requires first generating an ```bash cargo openvm setup ``` + Note that `cargo openvm setup` may attempt to download other files (i.e. KZG parameters) from an AWS S3 bucket into `~/.openvm/`. This command can take ~20mins on a `m6a.16xlarge` instance due to the keygen time. Upon a successful run, the command will write the files + - `agg.pk` - `verifier.sol` - `verifier.bytecode.json` + to `~/.openvm/`, where `~` is the directory specified by environment variable `$HOME`. Every command that requires these files will look for them in this directory. The `agg.pk` contains all aggregation proving keys necessary for aggregating to a final EVM proof. The `verifier.sol` file contains a Solidity contract to verify the final EVM proof. The contract is named `Halo2Verifier` and proof verification is the fallback function of the contract. In addition, the command outputs a JSON file `verifier.bytecode.json` of the form + ```json { "sol_compiler_version": "0.8.19", @@ -57,6 +64,7 @@ In addition, the command outputs a JSON file `verifier.bytecode.json` of the for "bytecode": "0x..." } ``` + where `sol_compiler_version` is the Solidity compiler version used to compile the contract (currently fixed to `0.8.19`), `sol_compiler_options` are additional compiler options used, and `bytecode` is the compiled EVM bytecode as a hex string. @@ -67,7 +75,6 @@ where `sol_compiler_version` is the Solidity compiler version used to compile th > > This command requires very large amounts of computation and memory (~200 GB). - ## Generating and Verifying an EVM Proof To generate and verify an EVM proof, you need to run the following commands: @@ -92,7 +99,9 @@ The EVM proof is written to `evm.proof` as a JSON of the following format: "proof": "0x.." } ``` + where each field is a hex string. We explain what each field represents: + - `accumulators`: `12 * 32` bytes representing the KZG accumulator of the proof, where the proof is from a SNARK using the KZG commitment scheme. - `exe_commit`: `32` bytes for the commitment of the app executable. - `leaf_commit`: `32` bytes for the commitment of the executable verifying app VM proofs. @@ -104,6 +113,7 @@ where each field is a hex string. We explain what each field represents: The `cargo openvm verify evm` command reads the EVM proof from JSON file and then simulates the call to the verifier contract using [Revm](https://github.com/bluealloy/revm/tree/main). This function should only be used for testing and development purposes but not for production. To verify the EVM proof in an EVM execution environment, the EVM proof must be formatted into calldata bytes and sent to the fallback function of the verifier smart contract. The calldata bytes are formed by concatenating the fields of the EVM proof described above in the following order and format: + 1. `accumulators`: every `32` bytes are _reversed_ from little endian to big endian and concatenated. 2. `exe_commit`: the `32` bytes are _reversed_ from little endian to big endian. 3. `leaf_commit`: the `32` bytes are _reversed_ from little endian to big endian. diff --git a/book/src/writing-apps/write-program.md b/book/src/writing-apps/write-program.md index 517784473c..8f2fb7d9d9 100644 --- a/book/src/writing-apps/write-program.md +++ b/book/src/writing-apps/write-program.md @@ -7,7 +7,7 @@ See the example [fibonacci program](https://github.com/openvm-org/openvm-example The guest program should be a `no_std` Rust crate. As long as it is `no_std`, you can import any other `no_std` crates and write Rust as you normally would. Import the `openvm` library crate to use `openvm` intrinsic functions (for example `openvm::io::*`). -More examples of guest programs can be found in the [benchmarks/programs](https://github.com/openvm-org/openvm/tree/main/benchmarks/programs) directory. +More examples of guest programs can be found in the [benchmarks/guest](https://github.com/openvm-org/openvm/tree/main/benchmarks/guest) directory. ## Handling I/O @@ -56,6 +56,7 @@ Binary crates can generally be written using the standard library, although for OpenVM fully supports `no_std` Rust. We refer to the [Embedded Rust Book](https://docs.rust-embedded.org/book/intro/no-std.html) for a more detailed introduction to `no_std` Rust. ### `no_std` library crates + In a library crate, you should add the following to `lib.rs` to declare your crate as `no_std`: ```rust @@ -101,16 +102,20 @@ fn main() { ``` If you want to feature gate the usage of the standard library, you can add + ```toml [features] std = ["openvm/std"] ``` + to `Cargo.toml` as discussed above. In this case, the `main.rs` header should be modified to: + ```rust // main.rs #![cfg_attr(not(feature = "std"), no_main)] #![cfg_attr(not(feature = "std"), no_std)] ``` + and you still need the `openvm::entry!(main)` line. This tells Rust to use the custom `main` handler when the environment is `no_std`, but to use the Rust `std` library and the standard `main` handler when the feature `"std"` is enabled. diff --git a/book/words.txt b/book/words.txt index e69de29bb2..556a8af01a 100644 --- a/book/words.txt +++ b/book/words.txt @@ -0,0 +1,28 @@ +openvm +Revm +println +vmexe +zkvm +rustup +usize +mathbb +keccak +Keccak +transpiles +transpiling +Transpiles +secp +serde +eprintln +unvalidated +xlarge +noplayground +Repr +riscv +EVMMAX +prehash +prehashed +Uninit +noverify +repr +insn \ No newline at end of file diff --git a/ci/README.md b/ci/README.md index 0266cb6b78..d1ee972a18 100644 --- a/ci/README.md +++ b/ci/README.md @@ -1,5 +1,5 @@ ### Notes on benchmark config -- `name` must match the binary name in `benchmarks/`. It will be used to find the working directory. -- `id` must be unique within the config file. It will be used as (part of) the file name when uploading to S3: `${id}-${current_sha}.[md/json]` \ No newline at end of file +- `name` must match the binary name in `benchmarks/prove/`. It will be used to find the working directory. +- `id` must be unique within the config file. It will be used as (part of) the file name when uploading to S3: `${id}-${current_sha}.[md/json]` diff --git a/ci/benchmark-config.json b/ci/benchmark-config.json index fb2d67d1fa..f3bf55ddb7 100644 --- a/ci/benchmark-config.json +++ b/ci/benchmark-config.json @@ -3,7 +3,7 @@ { "name": "verify_fibair", "id": "verify_fibair", - "working_directory": "benchmarks", + "working_directory": "benchmarks/prove", "e2e_bench": false, "run_params": [ { @@ -17,7 +17,7 @@ { "name": "fibonacci", "id": "fibonacci", - "working_directory": "benchmarks", + "working_directory": "benchmarks/prove", "e2e_bench": false, "run_params": [ { @@ -31,7 +31,7 @@ { "name": "regex", "id": "regex", - "working_directory": "benchmarks", + "working_directory": "benchmarks/prove", "e2e_bench": false, "run_params": [ { @@ -45,7 +45,7 @@ { "name": "ecrecover", "id": "ecrecover", - "working_directory": "benchmarks", + "working_directory": "benchmarks/prove", "e2e_bench": false, "run_params": [ { @@ -59,7 +59,7 @@ { "name": "pairing", "id": "pairing", - "working_directory": "benchmarks", + "working_directory": "benchmarks/prove", "e2e_bench": false, "run_params": [ { @@ -73,7 +73,7 @@ { "name": "fib_e2e", "id": "fib_e2e", - "working_directory": "benchmarks", + "working_directory": "benchmarks/prove", "e2e_bench": true, "run_params": [ { @@ -90,7 +90,7 @@ { "name": "kitchen_sink", "id": "kitchen_sink", - "working_directory": "benchmarks", + "working_directory": "benchmarks/prove", "e2e_bench": true, "run_params": [ { diff --git a/crates/circuits/mod-builder/src/builder.rs b/crates/circuits/mod-builder/src/builder.rs index 641b99317e..6e1c22a009 100644 --- a/crates/circuits/mod-builder/src/builder.rs +++ b/crates/circuits/mod-builder/src/builder.rs @@ -78,12 +78,13 @@ pub struct ExprBuilder { /// flag for debug mode debug: bool, - /// Whether the builder has been finalized. Only after finalize, we can do generate_subrow and eval etc. + /// Whether the builder has been finalized. Only after finalize, we can do generate_subrow and + /// eval etc. finalized: bool, // Setup opcode is a special op that verifies the modulus is correct. - // There are some chips that don't need it because we hardcode the modulus. E.g. the pairing ones. - // For those chips need setup, setup is derived: setup = is_valid - sum(all_flags) + // There are some chips that don't need it because we hardcode the modulus. E.g. the pairing + // ones. For those chips need setup, setup is derived: setup = is_valid - sum(all_flags) // Therefore when the chip only supports one opcode, user won't explicitly create a flag for it // and we will create a default flag for it on finalizing. needs_setup: bool, @@ -185,7 +186,8 @@ impl ExprBuilder { // so there should be same number of calls to the new_var, add_constraint and add_compute. pub fn new_var(&mut self) -> (usize, SymbolicExpr) { self.num_variables += 1; - // Allocate space for the new variable, to make sure they are corresponding to the same variable index. + // Allocate space for the new variable, to make sure they are corresponding to the same + // variable index. self.constraints.push(SymbolicExpr::Input(0)); self.computes.push(SymbolicExpr::Input(0)); self.q_limbs.push(0); @@ -349,10 +351,9 @@ impl SubAir for FieldExpr { builder.assert_bool(is_setup.clone()); // TODO[jpw]: currently we enforce at the program code level that: // - a valid program must call the correct setup opcodes to be correct - // - it would be better if we can constraint this in the circuit, - // however this has the challenge that when the same chip is used - // across continuation segments, - // only the first segment will have setup called + // - it would be better if we can constraint this in the circuit, however this has the + // challenge that when the same chip is used across continuation segments, only the + // first segment will have setup called let expected = iter::empty() .chain({ @@ -472,7 +473,8 @@ impl TraceSubRowGenerator for FieldExpr { .collect::>(); let zero = OverflowInt::::from_canonical_unsigned_limbs(vec![0], limb_bits); let mut vars_overflow = vec![zero; self.num_variables]; - // Note: in cases where the prime fits in less limbs than `num_limbs`, we use the smaller number of limbs. + // Note: in cases where the prime fits in less limbs than `num_limbs`, we use the smaller + // number of limbs. let prime_overflow = OverflowInt::::from_biguint(&self.prime, self.limb_bits, None); let constants: Vec<_> = self @@ -493,7 +495,8 @@ impl TraceSubRowGenerator for FieldExpr { vars_overflow[i] = OverflowInt::::from_biguint(&vars[i], self.limb_bits, Some(self.num_limbs)); } - // We need to have all variables computed first because, e.g. constraints[2] might need variables[3]. + // We need to have all variables computed first because, e.g. constraints[2] might need + // variables[3]. for i in 0..self.constraints.len() { // expr = q * p let expr_bigint = diff --git a/crates/circuits/mod-builder/src/core_chip.rs b/crates/circuits/mod-builder/src/core_chip.rs index 0499ce961c..30e9c65dbb 100644 --- a/crates/circuits/mod-builder/src/core_chip.rs +++ b/crates/circuits/mod-builder/src/core_chip.rs @@ -35,7 +35,8 @@ pub struct FieldExpressionCoreAir { /// All the opcode indices (including setup) supported by this Air. /// The last one must be the setup opcode if it's a chip needs setup. pub local_opcode_idx: Vec, - /// Opcode flag idx (indices from builder.new_flag()) for all except setup opcode. Empty if single op chip. + /// Opcode flag idx (indices from builder.new_flag()) for all except setup opcode. Empty if + /// single op chip. pub opcode_flag_idx: Vec, // Example 1: 1-op chip EcAdd that needs setup // local_opcode_idx = [0, 2], where 0 is EcAdd, 2 is setup @@ -178,7 +179,8 @@ pub struct FieldExpressionCoreChip { pub name: String, - /// Whether to finalize the trace. True if all-zero rows don't satisfy the constraints (e.g. there is int_add) + /// Whether to finalize the trace. True if all-zero rows don't satisfy the constraints (e.g. + /// there is int_add) pub should_finalize: bool, } @@ -245,8 +247,9 @@ where let local_opcode_idx = opcode.local_opcode_idx(self.air.offset); let mut flags = vec![]; - // If the chip doesn't need setup, (right now) it must be single op chip and thus no flag is needed. - // Otherwise, there is a flag for each opcode and will be derived by is_valid - sum(flags). + // If the chip doesn't need setup, (right now) it must be single op chip and thus no flag is + // needed. Otherwise, there is a flag for each opcode and will be derived by + // is_valid - sum(flags). if self.expr().needs_setup() { flags = vec![false; self.air.num_flags()]; self.air diff --git a/crates/circuits/mod-builder/src/field_variable.rs b/crates/circuits/mod-builder/src/field_variable.rs index 4f53e649e8..fcf0c20e79 100644 --- a/crates/circuits/mod-builder/src/field_variable.rs +++ b/crates/circuits/mod-builder/src/field_variable.rs @@ -89,7 +89,8 @@ impl FieldVariable { fn save_if_overflow( a: &mut FieldVariable, // will save this variable if overflow - expr: SymbolicExpr, // the "compute" expression of the result variable. Note that we need to check if constraint overflows + expr: SymbolicExpr, /* the "compute" expression of the result variable. Note that we + * need to check if constraint overflows */ limb_max_abs: usize, // The max abs of limbs of compute expression. ) { if let SymbolicExpr::Var(_) = a.expr { @@ -115,7 +116,8 @@ impl FieldVariable { // TODO[Lun-Kai]: rethink about how should auto-save work. // This implementation requires self and other to be mutable, and might actually mutate them. - // This might surprise the caller or introduce hard bug if the caller clone the FieldVariable and then call this. + // This might surprise the caller or introduce hard bug if the caller clone the FieldVariable + // and then call this. pub fn add(&mut self, other: &mut FieldVariable) -> FieldVariable { assert!(Rc::ptr_eq(&self.builder, &other.builder)); let limb_max_fn = |a: &FieldVariable, b: &FieldVariable| a.limb_max_abs + b.limb_max_abs; diff --git a/crates/circuits/mod-builder/src/symbolic_expr.rs b/crates/circuits/mod-builder/src/symbolic_expr.rs index 8febde4cd1..a84bf6504a 100644 --- a/crates/circuits/mod-builder/src/symbolic_expr.rs +++ b/crates/circuits/mod-builder/src/symbolic_expr.rs @@ -222,7 +222,8 @@ impl SymbolicExpr { SymbolicExpr::IntAdd(lhs, s) => { let (lhs_max_pos, lhs_max_neg) = lhs.max_abs(proper_max); let scalar = BigUint::from_usize(s.unsigned_abs()).unwrap(); - // Optimization opportunity: since `s` is a constant, we can likely do better than this bound. + // Optimization opportunity: since `s` is a constant, we can likely do better than + // this bound. (lhs_max_pos + &scalar, lhs_max_neg + &scalar) } SymbolicExpr::IntMul(lhs, s) => { @@ -243,8 +244,8 @@ impl SymbolicExpr { } /// Returns the maximum possible size, in bits, of each limb in `self.expr`. - /// This is already tracked in `FieldVariable`. However when auto saving in `FieldVariable::div`, - /// we need to know it from the `SymbolicExpr` only. + /// This is already tracked in `FieldVariable`. However when auto saving in + /// `FieldVariable::div`, we need to know it from the `SymbolicExpr` only. /// self should be a constraint expr. pub fn constraint_limb_max_abs(&self, limb_bits: usize, num_limbs: usize) -> usize { let canonical_limb_max_abs = (1 << limb_bits) - 1; diff --git a/crates/circuits/mod-builder/src/tests.rs b/crates/circuits/mod-builder/src/tests.rs index 4b12850ffe..d217c0c5c2 100644 --- a/crates/circuits/mod-builder/src/tests.rs +++ b/crates/circuits/mod-builder/src/tests.rs @@ -135,8 +135,8 @@ fn test_auto_carry_intmul() { let mut x3 = &mut x1 * &mut x2; // The int_mul below will overflow: // x3 should have max_overflow_bits = 8 + 8 + log2(32) = 21 - // The carry bits = "max_overflow_bits - limb_bits + 1" will exceed 17 if it exceeds 17 + 8 - 1 = 24. - // So it triggers x3 to be saved first. + // The carry bits = "max_overflow_bits - limb_bits + 1" will exceed 17 if it exceeds 17 + 8 - 1 + // = 24. So it triggers x3 to be saved first. let mut x4 = x3.int_mul(9); assert_eq!(x3.expr, SymbolicExpr::Var(0)); x4.save(); @@ -229,7 +229,8 @@ fn test_auto_carry_div() { let x2 = ExprBuilder::new_input(builder.clone()); // The choice of scalar (7) needs to be such that // 1. the denominator 7x^2 doesn't trigger autosave, >=8 doesn't work. - // 2. But doing a division on it triggers autosave, because of division constraint, <= 6 doesn't work. + // 2. But doing a division on it triggers autosave, because of division constraint, <= 6 doesn't + // work. let mut x3 = x1.square().int_mul(7) / x2; x3.save(); @@ -387,9 +388,10 @@ fn test_symbolic_limbs_mul() { Box::new(SymbolicExpr::Var(0)), Box::new(SymbolicExpr::Var(1)), ); - // x * y = pq, q can be up to p so can limbs as p. - // x * y and p * q both have 63 limbs. - let expected_q = 32; - let expected_carry = 63; + // x * y = pq, and x,y can be up to 2^256 - 1 so q can be up to ceil((2^256 - 1)^2 / p) which + // has 257 bits, which is 33 limbs x * y has 63 limbs, but p * q can have 64 limbs since q + // is 33 limbs + let expected_q = 33; + let expected_carry = 64; test_symbolic_limbs(expr, expected_q, expected_carry); } diff --git a/crates/circuits/poseidon2-air/src/config.rs b/crates/circuits/poseidon2-air/src/config.rs index 4519784b59..be597c6dc6 100644 --- a/crates/circuits/poseidon2-air/src/config.rs +++ b/crates/circuits/poseidon2-air/src/config.rs @@ -8,7 +8,8 @@ use super::{ BABY_BEAR_POSEIDON2_HALF_FULL_ROUNDS, BABY_BEAR_POSEIDON2_PARTIAL_ROUNDS, POSEIDON2_WIDTH, }; -// Currently only contains round constants, but this struct may contain other configuration parameters in the future. +// Currently only contains round constants, but this struct may contain other configuration +// parameters in the future. #[derive(Clone, Copy, Debug)] pub struct Poseidon2Config { pub constants: Poseidon2Constants, diff --git a/crates/circuits/poseidon2-air/src/lib.rs b/crates/circuits/poseidon2-air/src/lib.rs index db8d825e20..8a51ee88c7 100644 --- a/crates/circuits/poseidon2-air/src/lib.rs +++ b/crates/circuits/poseidon2-air/src/lib.rs @@ -2,7 +2,8 @@ //! get around some complications with field-specific generics associated with Poseidon2. //! Currently it is only intended for use in OpenVM with BabyBear. //! -//! We do not recommend external use of this crate, and suggest using the [p3_poseidon2_air] crate directly. +//! We do not recommend external use of this crate, and suggest using the [p3_poseidon2_air] crate +//! directly. use std::sync::Arc; @@ -39,7 +40,8 @@ pub const BABY_BEAR_POSEIDON2_PARTIAL_ROUNDS: usize = 13; // Currently we only support SBOX_DEGREE = 7 pub const BABY_BEAR_POSEIDON2_SBOX_DEGREE: u64 = 7; -/// `SBOX_REGISTERS` affects the max constraint degree of the AIR. See [p3_poseidon2_air] for more details. +/// `SBOX_REGISTERS` affects the max constraint degree of the AIR. See [p3_poseidon2_air] for more +/// details. #[derive(Debug)] pub struct Poseidon2SubChip { // This is Arc purely because Poseidon2Air cannot derive Clone diff --git a/crates/circuits/poseidon2-air/src/permute.rs b/crates/circuits/poseidon2-air/src/permute.rs index d239928cd5..db3ecb0496 100644 --- a/crates/circuits/poseidon2-air/src/permute.rs +++ b/crates/circuits/poseidon2-air/src/permute.rs @@ -18,14 +18,16 @@ pub trait Poseidon2MatrixConfig: Clone + Sync { fn int_diag_m1_matrix() -> [F; WIDTH]; } -/// This type needs to implement GenericPoseidon2LinearLayers generic in F so that our Poseidon2SubAir can also -/// be generic in F, but in reality each implementation of this struct's functions should be field specific. To -/// circumvent this, Poseidon2LinearLayers is generic in F but **currently requires** that F is BabyBear. +/// This type needs to implement GenericPoseidon2LinearLayers generic in F so that our +/// Poseidon2SubAir can also be generic in F, but in reality each implementation of this struct's +/// functions should be field specific. To circumvent this, Poseidon2LinearLayers is generic in F +/// but **currently requires** that F is BabyBear. #[derive(Debug, Clone)] pub struct BabyBearPoseidon2LinearLayers; -// This is the same as the implementation for GenericPoseidon2LinearLayersMonty31 except that we drop the -// clause that FA needs be multipliable by BabyBear. +// This is the same as the implementation for +// GenericPoseidon2LinearLayersMonty31 except +// that we drop the clause that FA needs be multipliable by BabyBear. // TODO[jpw/stephen]: This is clearly not the best way to do this, but it would // require some reworking in plonky3 to get around the generics. impl GenericPoseidon2LinearLayers for BabyBearPoseidon2LinearLayers { diff --git a/crates/circuits/primitives/derive/src/lib.rs b/crates/circuits/primitives/derive/src/lib.rs index 1cece4e3fb..47ff1e220a 100644 --- a/crates/circuits/primitives/derive/src/lib.rs +++ b/crates/circuits/primitives/derive/src/lib.rs @@ -38,7 +38,8 @@ pub fn aligned_borrow_derive(input: TokenStream) -> TokenStream { }) .collect::>(); - // Get impl generics (``), type generics (``), where clause (`where T: Clone`) + // Get impl generics (``), type generics (``), where + // clause (`where T: Clone`) let (impl_generics, type_generics, where_clause) = ast.generics.split_for_impl(); let methods = quote! { @@ -331,8 +332,8 @@ pub fn bytes_stateful_derive(input: TokenStream) -> TokenStream { } _ => panic!("Only unnamed fields are supported"), }; - // Use full path ::openvm_circuit... so it can be used either within or outside the vm crate. - // Assume F is already generic of the field. + // Use full path ::openvm_circuit... so it can be used either within or outside the vm + // crate. Assume F is already generic of the field. let mut new_generics = generics.clone(); let where_clause = new_generics.make_where_clause(); where_clause @@ -365,7 +366,8 @@ pub fn bytes_stateful_derive(input: TokenStream) -> TokenStream { (variant_name, field) }) .collect::>(); - // Use full path ::openvm_stark_backend... so it can be used either within or outside the vm crate. + // Use full path ::openvm_stark_backend... so it can be used either within or outside + // the vm crate. let (load_state_arms, store_state_arms): (Vec<_>, Vec<_>) = multiunzip(variants.iter().map(|(variant_name, field)| { let field_ty = &field.ty; diff --git a/crates/circuits/primitives/src/assert_less_than/mod.rs b/crates/circuits/primitives/src/assert_less_than/mod.rs index 78f454f36b..53054c713a 100644 --- a/crates/circuits/primitives/src/assert_less_than/mod.rs +++ b/crates/circuits/primitives/src/assert_less_than/mod.rs @@ -131,10 +131,11 @@ impl AssertLtSubAir { }); // constrain that y - x - 1 is equal to the constructed lower value. - // this enforces that the intermediate value is in the range [0, 2^max_bits - 1], which is equivalent to x < y + // this enforces that the intermediate value is in the range [0, 2^max_bits - 1], which is + // equivalent to x < y builder.when(io.count).assert_eq(intermed_val, lower); - // the degree of this constraint is expected to be deg(count) + max(deg(intermed_val), deg(lower)) - // since we are constraining count * intermed_val == count * lower + // the degree of this constraint is expected to be deg(count) + max(deg(intermed_val), + // deg(lower)) since we are constraining count * intermed_val == count * lower } #[inline(always)] @@ -185,7 +186,8 @@ impl SubAir for AssertLtSubAir { impl TraceSubRowGenerator for AssertLtSubAir { /// (range_checker, x, y) - // x, y are u32 because memory records are storing u32 and there would be needless conversions. It also prevents a F: PrimeField32 trait bound. + // x, y are u32 because memory records are storing u32 and there would be needless conversions. + // It also prevents a F: PrimeField32 trait bound. type TraceContext<'a> = (&'a VariableRangeCheckerChip, u32, u32); /// lower_decomp type ColsMut<'a> = &'a mut [F]; diff --git a/crates/circuits/primitives/src/bigint/check_carry_mod_to_zero.rs b/crates/circuits/primitives/src/bigint/check_carry_mod_to_zero.rs index e99b4c6a9c..35b00c63b2 100644 --- a/crates/circuits/primitives/src/bigint/check_carry_mod_to_zero.rs +++ b/crates/circuits/primitives/src/bigint/check_carry_mod_to_zero.rs @@ -18,8 +18,8 @@ pub struct CheckCarryModToZeroCols { pub carries: Vec, // We will check that expr - quotient * modulus = 0, which imples expr is 0 mod modulus. - // quotient can be negative, and this means there is no unique way to represent it as limbs but it's fine. - // Each limb will be range-checked to be in [-2^limb_bits, 2^limb_bits). + // quotient can be negative, and this means there is no unique way to represent it as limbs but + // it's fine. Each limb will be range-checked to be in [-2^limb_bits, 2^limb_bits). pub quotient: Vec, } diff --git a/crates/circuits/primitives/src/bigint/mod.rs b/crates/circuits/primitives/src/bigint/mod.rs index 3e57ffa343..83305a2689 100644 --- a/crates/circuits/primitives/src/bigint/mod.rs +++ b/crates/circuits/primitives/src/bigint/mod.rs @@ -36,7 +36,8 @@ impl OverflowInt { } } - // Limbs can be negative. So the max_overflow_bits and limb_max_abs are different from the range check result. + // Limbs can be negative. So the max_overflow_bits and limb_max_abs are different from the range + // check result. pub fn from_canonical_signed_limbs(x: Vec, limb_bits: usize) -> OverflowInt { OverflowInt { limbs: x, diff --git a/crates/circuits/primitives/src/bitwise_op_lookup/mod.rs b/crates/circuits/primitives/src/bitwise_op_lookup/mod.rs index bd129dfcae..a9e649f84e 100644 --- a/crates/circuits/primitives/src/bitwise_op_lookup/mod.rs +++ b/crates/circuits/primitives/src/bitwise_op_lookup/mod.rs @@ -251,6 +251,10 @@ impl ChipUsageGetter for SharedBitwiseOperationLookupChip self.0.air_name() } + fn constant_trace_height(&self) -> Option { + self.0.constant_trace_height() + } + fn current_trace_height(&self) -> usize { self.0.current_trace_height() } diff --git a/crates/circuits/primitives/src/encoder/mod.rs b/crates/circuits/primitives/src/encoder/mod.rs index 31309e9865..b162518757 100644 --- a/crates/circuits/primitives/src/encoder/mod.rs +++ b/crates/circuits/primitives/src/encoder/mod.rs @@ -20,7 +20,8 @@ pub struct Encoder { /// The number of flags, excluding the invalid/dummy flag. flag_cnt: usize, /// Maximal degree of the flag expressions. - /// The maximal degree of the equalities in the AIR, however, **is one higher:** that is, `max_flag_degree + 1`. + /// The maximal degree of the equalities in the AIR, however, **is one higher:** that is, + /// `max_flag_degree + 1`. max_flag_degree: u32, /// All possible points in the k-dimensional space that can be used to encode flags pts: Vec>, @@ -34,7 +35,8 @@ impl Encoder { /// The zero point is reserved for the dummy row. /// `max_degree` is the upper bound for the flag expressions, but the `eval` function /// of the encoder itself will use some constraints of degree `max_degree + 1`. - /// `reserve_invalid` indicates if the encoder should reserve the (0, ..., 0) point as an invalid/dummy flag. + /// `reserve_invalid` indicates if the encoder should reserve the (0, ..., 0) point as an + /// invalid/dummy flag. pub fn new(cnt: usize, max_degree: u32, reserve_invalid: bool) -> Self { // Calculate binomial coefficient (d+k choose k) to determine how many points we can encode let binomial = |x: u32| { @@ -109,7 +111,8 @@ impl Encoder { expr * denom.inverse() } - /// Get the polynomial expression that equals 1 when the variables encode the flag at index flag_idx + /// Get the polynomial expression that equals 1 when the variables encode the flag at index + /// flag_idx pub fn get_flag_expr( &self, flag_idx: usize, @@ -125,7 +128,8 @@ impl Encoder { self.pts[flag_idx + self.reserve_invalid as usize].clone() } - /// Returns an expression that is 1 if the variables encode a valid flag and 0 if they encode the invalid point + /// Returns an expression that is 1 if the variables encode a valid flag and 0 if they encode + /// the invalid point pub fn is_valid(&self, vars: &[AB::Var]) -> AB::Expr { AB::Expr::ONE - self.expression_for_point::(&self.pts[0], vars) } diff --git a/crates/circuits/primitives/src/is_equal/mod.rs b/crates/circuits/primitives/src/is_equal/mod.rs index c7bdaac139..7a2ec362ab 100644 --- a/crates/circuits/primitives/src/is_equal/mod.rs +++ b/crates/circuits/primitives/src/is_equal/mod.rs @@ -16,8 +16,8 @@ pub struct IsEqualIo { pub y: T, /// The boolean output, constrained to equal (x == y), when `condition != 0`. pub out: T, - /// Constraints only hold when `condition != 0`. When `condition == 0`, setting all trace values - /// to zero still passes the constraints. + /// Constraints only hold when `condition != 0`. When `condition == 0`, setting all trace + /// values to zero still passes the constraints. pub condition: T, } diff --git a/crates/circuits/primitives/src/is_equal_array/mod.rs b/crates/circuits/primitives/src/is_equal_array/mod.rs index b6adc4f9fb..586c29333b 100644 --- a/crates/circuits/primitives/src/is_equal_array/mod.rs +++ b/crates/circuits/primitives/src/is_equal_array/mod.rs @@ -14,8 +14,8 @@ pub struct IsEqArrayIo { pub y: [T; NUM], /// The boolean output, constrained to equal (x == y) when `condition != 0`. pub out: T, - /// Constraints only hold when `condition != 0`. When `condition == 0`, setting all trace values - /// to zero still passes the constraints. + /// Constraints only hold when `condition != 0`. When `condition == 0`, setting all trace + /// values to zero still passes the constraints. pub condition: T, } @@ -51,7 +51,8 @@ impl SubAir for IsEqArraySubAir { let mut sum = io.out.clone(); // If x == y: then sum == 1 implies out = 1. // If x != y: then out * (x[i] - y[i]) == 0 implies out = 0. - // to get the sum == 1 to be satisfied, we set diff_inv_marker[i] = (x[i] - y[i])^{-1} at the first index i such that x[i] != y[i]. + // to get the sum == 1 to be satisfied, we set diff_inv_marker[i] = (x[i] - + // y[i])^{-1} at the first index i such that x[i] != y[i]. for (x_i, y_i, inv_marker_i) in izip!(io.x, io.y, diff_inv_marker) { sum += (x_i.clone() - y_i.clone()) * inv_marker_i; builder.assert_zero(io.out.clone() * (x_i - y_i)); diff --git a/crates/circuits/primitives/src/is_less_than/mod.rs b/crates/circuits/primitives/src/is_less_than/mod.rs index b980679416..b4fc8948f8 100644 --- a/crates/circuits/primitives/src/is_less_than/mod.rs +++ b/crates/circuits/primitives/src/is_less_than/mod.rs @@ -75,8 +75,8 @@ pub struct IsLtSubAir { pub bus: VariableRangeCheckerBus, /// The maximum number of bits for the numbers to compare /// Soundness requirement: max_bits <= 29 - /// max_bits > 29 doesn't work: the approach is to decompose and range check `y - x - 1 + 2^max_bits` is non-negative. - /// This requires 2^{max_bits+1} < |F|. + /// max_bits > 29 doesn't work: the approach is to decompose and range check `y - x - 1 + + /// 2^max_bits` is non-negative. This requires 2^{max_bits+1} < |F|. /// When F::bits() = 31, this implies max_bits <= 29. pub max_bits: usize, /// `decomp_limbs = max_bits.div_ceil(bus.range_max_bits)`. @@ -135,7 +135,8 @@ impl IsLtSubAir { let out = out.into(); // constrain that the lower + out * 2^max_bits is the correct intermediate sum let check_val = lower + out.clone() * AB::Expr::from_canonical_usize(1 << self.max_bits); - // the degree of this constraint is expected to be deg(count) + max(deg(intermed_val), deg(lower)) + // the degree of this constraint is expected to be deg(count) + max(deg(intermed_val), + // deg(lower)) builder.when(condition).assert_eq(intermed_val, check_val); builder.assert_bool(out); } diff --git a/crates/circuits/primitives/src/is_less_than/tests.rs b/crates/circuits/primitives/src/is_less_than/tests.rs index 1136da01e9..a9f211b659 100644 --- a/crates/circuits/primitives/src/is_less_than/tests.rs +++ b/crates/circuits/primitives/src/is_less_than/tests.rs @@ -33,8 +33,8 @@ pub struct IsLessThanCols { pub lower_decomp: Vec, } -/// Note that this air has no const generics. The parameters such as `max_bits, decomp_limbs` are all -/// configured in the constructor at runtime. +/// Note that this air has no const generics. The parameters such as `max_bits, decomp_limbs` are +/// all configured in the constructor at runtime. #[derive(Clone, Copy)] pub struct IsLtTestAir(pub IsLtSubAir); @@ -94,7 +94,8 @@ impl IsLessThanChip { } } -// We create a custom struct of mutable references since `IsLessThanCols` cannot derive `AlignedBorrow`. +// We create a custom struct of mutable references since `IsLessThanCols` cannot derive +// `AlignedBorrow`. pub struct IsLessThanColsMut<'a, T> { pub x: &'a mut T, pub y: &'a mut T, diff --git a/crates/circuits/primitives/src/is_less_than_array/mod.rs b/crates/circuits/primitives/src/is_less_than_array/mod.rs index f0e8d04c1e..5113a83ad7 100644 --- a/crates/circuits/primitives/src/is_less_than_array/mod.rs +++ b/crates/circuits/primitives/src/is_less_than_array/mod.rs @@ -21,8 +21,8 @@ pub mod tests; pub struct IsLtArrayIo { pub x: [T; NUM], pub y: [T; NUM], - /// The boolean output, constrained to equal (x < y) when `condition != 0`. The less than comparison - /// is done lexicographically. + /// The boolean output, constrained to equal (x < y) when `condition != 0`. The less than + /// comparison is done lexicographically. pub out: T, /// Constraints only hold when `count != 0`. When `count == 0`, setting all trace values /// to zero still passes the constraints. @@ -34,7 +34,8 @@ pub struct IsLtArrayIo { #[derive(AlignedBorrow, Clone, Copy, Debug)] pub struct IsLtArrayAuxCols { // `diff_marker` is filled with 0 except at the lowest index i such that - // `x[i] != y[i]`. If such an `i` exists then it is constrained that `diff_inv = inv(y[i] - x[i])`. + // `x[i] != y[i]`. If such an `i` exists then it is constrained that `diff_inv = inv(y[i] - + // x[i])`. pub diff_marker: [T; NUM], pub diff_inv: T, pub lt_aux: LessThanAuxCols, @@ -135,11 +136,12 @@ impl IsLtArraySubAir { } builder.assert_bool(prefix_sum.clone()); // When condition != 0, - // - If `x != y`, then `prefix_sum = 1` so marker[i] must be nonzero iff - // i is the first index where `x[i] != y[i]`. Constrains that - // `diff_inv * (y[i] - x[i]) = 1` (`diff_val` is non-zero). + // - If `x != y`, then `prefix_sum = 1` so marker[i] must be nonzero iff i is the first + // index where `x[i] != y[i]`. Constrains that `diff_inv * (y[i] - x[i]) = 1` (`diff_val` + // is non-zero). // - If `x == y`, then `prefix_sum = 0` and `out == 0` (below) - // - `prefix_sum` cannot be 1 because all diff are zero and it would be impossible to find an inverse. + // - `prefix_sum` cannot be 1 because all diff are zero and it would be impossible to + // find an inverse. builder .when(io.count.clone()) diff --git a/crates/circuits/primitives/src/is_less_than_array/tests.rs b/crates/circuits/primitives/src/is_less_than_array/tests.rs index 4e9f69a1b7..b709ef5b9d 100644 --- a/crates/circuits/primitives/src/is_less_than_array/tests.rs +++ b/crates/circuits/primitives/src/is_less_than_array/tests.rs @@ -66,11 +66,11 @@ impl Air } /// This chip computes whether one tuple is lexicographically less than another. Each element of the -/// tuple has its own max number of bits, given by the limb_bits array. The chip assumes that each limb -/// is within its given max limb_bits. +/// tuple has its own max number of bits, given by the limb_bits array. The chip assumes that each +/// limb is within its given max limb_bits. /// -/// The IsLessThanTupleChip uses the IsLessThanChip as a subchip to check whether individual tuple elements -/// are less than each other. +/// The IsLessThanTupleChip uses the IsLessThanChip as a subchip to check whether individual tuple +/// elements are less than each other. pub struct IsLtArrayChip { pub air: IsLtArrayTestAir, pub range_checker: Arc, diff --git a/crates/circuits/primitives/src/is_zero/mod.rs b/crates/circuits/primitives/src/is_zero/mod.rs index 951d134256..9d2944416c 100644 --- a/crates/circuits/primitives/src/is_zero/mod.rs +++ b/crates/circuits/primitives/src/is_zero/mod.rs @@ -13,8 +13,8 @@ pub struct IsZeroIo { pub x: T, /// The boolean output, constrained to equal (x == 0) when `condition != 0`.. pub out: T, - /// Constraints only hold when `condition != 0`. When `condition == 0`, setting all trace values - /// to zero still passes the constraints. + /// Constraints only hold when `condition != 0`. When `condition == 0`, setting all trace + /// values to zero still passes the constraints. pub condition: T, } diff --git a/crates/circuits/primitives/src/lib.rs b/crates/circuits/primitives/src/lib.rs index c03cfd0b90..723cc017f1 100644 --- a/crates/circuits/primitives/src/lib.rs +++ b/crates/circuits/primitives/src/lib.rs @@ -1,5 +1,6 @@ //! This crate contains a collection of primitives for use when building circuits. -//! The primitives are separated into two types: standalone [Air](openvm_stark_backend::p3_air::Air)s and [SubAir]s. +//! The primitives are separated into two types: standalone +//! [Air](openvm_stark_backend::p3_air::Air)s and [SubAir]s. //! //! The following modules contain standalone [Air](openvm_stark_backend::p3_air::Air)s: //! - [range] diff --git a/crates/circuits/primitives/src/range/bus.rs b/crates/circuits/primitives/src/range/bus.rs index 988d1a131c..e7315dd635 100644 --- a/crates/circuits/primitives/src/range/bus.rs +++ b/crates/circuits/primitives/src/range/bus.rs @@ -20,7 +20,8 @@ impl RangeCheckBus { /// Range check that `x` is in the range `[0, 2^max_bits)`. /// - /// This can be used when `2^max_bits < self.range_max` **if `2 * self.range_max` is less than the field modulus**. + /// This can be used when `2^max_bits < self.range_max` **if `2 * self.range_max` is less than + /// the field modulus**. pub fn range_check( &self, x: impl Into, @@ -97,10 +98,13 @@ impl BitsCheckBusInteraction { { let count = count.into(); if self.shift > 0 { - // if 2^max_bits < range_max, then we also range check that `x + (range_max - 2^max_bits) < range_max` + // if 2^max_bits < range_max, then we also range check that `x + (range_max - + // 2^max_bits) < range_max` // - this will hold if `x < 2^max_bits` (necessary) - // - if `x < range_max` then we know the integer value `x.as_canonical_u32() + (range_max - 2^max_bits) < 2*range_max`. - // **Assuming that `2*range_max < F::MODULUS`, then additionally knowing `x + (range_max - 2^max_bits) < range_max` implies `x < 2^max_bits`. + // - if `x < range_max` then we know the integer value `x.as_canonical_u32() + + // (range_max - 2^max_bits) < 2*range_max`. **Assuming that `2*range_max < + // F::MODULUS`, then additionally knowing `x + (range_max - 2^max_bits) < range_max` + // implies `x < 2^max_bits`. self.bus.lookup_key( builder, [self.x.clone() + AB::Expr::from_canonical_u32(self.shift)], diff --git a/crates/circuits/primitives/src/range/mod.rs b/crates/circuits/primitives/src/range/mod.rs index 87ffe1cf7f..39dd70aae7 100644 --- a/crates/circuits/primitives/src/range/mod.rs +++ b/crates/circuits/primitives/src/range/mod.rs @@ -1,6 +1,7 @@ //! Range check for a fixed bit size via preprocessed trace. //! -//! Caution: We almost always prefer to use the [VariableRangeCheckerChip](super::var_range::VariableRangeCheckerChip) instead of this chip. +//! Caution: We almost always prefer to use the +//! [VariableRangeCheckerChip](super::var_range::VariableRangeCheckerChip) instead of this chip. // Adapted from Valida use core::mem::size_of; diff --git a/crates/circuits/primitives/src/range_gate/mod.rs b/crates/circuits/primitives/src/range_gate/mod.rs index 214994cf17..7c1a877c49 100644 --- a/crates/circuits/primitives/src/range_gate/mod.rs +++ b/crates/circuits/primitives/src/range_gate/mod.rs @@ -1,6 +1,7 @@ //! Range check for a fixed bit size without using preprocessed trace. //! -//! Caution: We almost always prefer to use the [VariableRangeCheckerChip](super::var_range::VariableRangeCheckerChip) instead of this chip. +//! Caution: We almost always prefer to use the +//! [VariableRangeCheckerChip](super::var_range::VariableRangeCheckerChip) instead of this chip. use std::{ borrow::Borrow, diff --git a/crates/circuits/primitives/src/range_tuple/mod.rs b/crates/circuits/primitives/src/range_tuple/mod.rs index 1133002c33..3d0754cc9a 100644 --- a/crates/circuits/primitives/src/range_tuple/mod.rs +++ b/crates/circuits/primitives/src/range_tuple/mod.rs @@ -1,7 +1,8 @@ //! Range check a tuple simultaneously. //! When you know you want to range check `(x, y)` to `x_bits, y_bits` respectively -//! and `2^{x_bits + y_bits} < ~2^20`, then you can use this chip to do the range check in one interaction -//! versus the two interactions necessary if you were to use [VariableRangeCheckerChip](super::var_range::VariableRangeCheckerChip) instead. +//! and `2^{x_bits + y_bits} < ~2^20`, then you can use this chip to do the range check in one +//! interaction versus the two interactions necessary if you were to use +//! [VariableRangeCheckerChip](super::var_range::VariableRangeCheckerChip) instead. use std::{ mem::size_of, @@ -231,6 +232,10 @@ impl ChipUsageGetter for SharedRangeTupleCheckerChip { self.0.air_name() } + fn constant_trace_height(&self) -> Option { + self.0.constant_trace_height() + } + fn current_trace_height(&self) -> usize { self.0.current_trace_height() } diff --git a/crates/circuits/primitives/src/sub_air.rs b/crates/circuits/primitives/src/sub_air.rs index c9d4ccd471..56b1435f8b 100644 --- a/crates/circuits/primitives/src/sub_air.rs +++ b/crates/circuits/primitives/src/sub_air.rs @@ -15,8 +15,8 @@ use openvm_stark_backend::p3_air::AirBuilder; /// the `SubAir` from the `Io` part. These `AuxCols` are typically just slices of `AB::Var`. /// /// This trait only owns the constraints, but it is expected that the [TraceSubRowGenerator] trait -/// or some analogous functionality is also implemented so that the trace generation of the `AuxCols` -/// of each row can be done purely in terms of the `Io` part. +/// or some analogous functionality is also implemented so that the trace generation of the +/// `AuxCols` of each row can be done purely in terms of the `Io` part. pub trait SubAir { /// Type to define the context, typically in terms of `AB::Expr` that are needed /// to define the SubAir's constraints. @@ -38,12 +38,14 @@ pub trait SubAir { // [jpw] This could be part of SubAir, but I want to keep SubAir to be constraints only pub trait TraceSubRowGenerator { /// The minimal amount of information needed to generate the sub-row of the trace matrix. - /// This type has a lifetime so other context, such as references to other chips, can be provided. + /// This type has a lifetime so other context, such as references to other chips, can be + /// provided. type TraceContext<'a> where Self: 'a; - /// The type for the columns to mutate. Often this can be `&'a mut Cols` if `Cols` is on the stack. - /// For structs that use the heap, this should be a struct that contains mutable slices. + /// The type for the columns to mutate. Often this can be `&'a mut Cols` if `Cols` is on the + /// stack. For structs that use the heap, this should be a struct that contains mutable + /// slices. type ColsMut<'a> where Self: 'a; diff --git a/crates/circuits/primitives/src/var_range/mod.rs b/crates/circuits/primitives/src/var_range/mod.rs index c2d36097c3..1ba3f2e776 100644 --- a/crates/circuits/primitives/src/var_range/mod.rs +++ b/crates/circuits/primitives/src/var_range/mod.rs @@ -134,7 +134,8 @@ impl VariableRangeCheckerChip { )] pub fn add_count(&self, value: u32, max_bits: usize) { // index is 2^max_bits + value - 1 + 1 for the extra [0, 0] row - // if each [value, max_bits] is valid, the sends multiset will be exactly the receives multiset + // if each [value, max_bits] is valid, the sends multiset will be exactly the receives + // multiset let idx = (1 << max_bits) + (value as usize); assert!( idx < self.count.len(), @@ -262,6 +263,10 @@ impl ChipUsageGetter for SharedVariableRangeCheckerChip { self.0.air_name() } + fn constant_trace_height(&self) -> Option { + self.0.constant_trace_height() + } + fn current_trace_height(&self) -> usize { self.0.current_trace_height() } diff --git a/crates/circuits/primitives/src/xor/lookup/mod.rs b/crates/circuits/primitives/src/xor/lookup/mod.rs index 3e968c460b..c9e76ad4c9 100644 --- a/crates/circuits/primitives/src/xor/lookup/mod.rs +++ b/crates/circuits/primitives/src/xor/lookup/mod.rs @@ -50,7 +50,8 @@ pub struct XorLookupPreprocessedCols { pub const NUM_XOR_LOOKUP_COLS: usize = size_of::>(); pub const NUM_XOR_LOOKUP_PREPROCESSED_COLS: usize = size_of::>(); -/// Xor via preprocessed lookup table. Can only be used if inputs have less than approximately 10-bits. +/// Xor via preprocessed lookup table. Can only be used if inputs have less than approximately +/// 10-bits. #[derive(Clone, Copy, Debug, derive_new::new)] pub struct XorLookupAir { pub bus: XorBus, @@ -99,7 +100,8 @@ where /// This chip gets requests to compute the xor of two numbers x and y of at most M bits. /// It generates a preprocessed table with a row for each possible triple (x, y, x^y) -/// and keeps count of the number of times each triple is requested for the single main trace column. +/// and keeps count of the number of times each triple is requested for the single main trace +/// column. #[derive(Debug)] pub struct XorLookupChip { pub air: XorLookupAir, diff --git a/crates/circuits/sha256-air/src/air.rs b/crates/circuits/sha256-air/src/air.rs index e47aa96504..96578984d0 100644 --- a/crates/circuits/sha256-air/src/air.rs +++ b/crates/circuits/sha256-air/src/air.rs @@ -76,7 +76,8 @@ impl Sha256Air { let main = builder.main(); let local = main.row_slice(0); - // Doesn't matter which column struct we use here as we are only interested in the common columns + // Doesn't matter which column struct we use here as we are only interested in the common + // columns let local_cols: &Sha256DigestCols = local[start_col..start_col + SHA256_DIGEST_WIDTH].borrow(); let flags = &local_cols.flags; @@ -114,8 +115,8 @@ impl Sha256Air { flags.is_padding_row(), ); - // Constrain a, e, being composed of bits: we make sure a and e are always in the same place in the trace matrix - // Note: this has to be true for every row, even padding rows + // Constrain a, e, being composed of bits: we make sure a and e are always in the same place + // in the trace matrix Note: this has to be true for every row, even padding rows for i in 0..SHA256_ROUNDS_PER_ROW { for j in 0..SHA256_WORD_BITS { builder.assert_bool(local_cols.hash.a[i][j]); @@ -134,7 +135,8 @@ impl Sha256Air { local: &Sha256RoundCols, next: &Sha256DigestCols, ) { - // Check that if this is the last row of a message or an inpadding row, the hash should be the [SHA256_H] + // Check that if this is the last row of a message or an inpadding row, the hash should be + // the [SHA256_H] for i in 0..SHA256_ROUNDS_PER_ROW { let a = next.hash.a[i].map(|x| x.into()); let e = next.hash.e[i].map(|x| x.into()); @@ -142,7 +144,8 @@ impl Sha256Air { let a_limb = compose::(&a[j * 16..(j + 1) * 16], 1); let e_limb = compose::(&e[j * 16..(j + 1) * 16], 1); - // If it is a padding row or the last row of a message, the `hash` should be the [SHA256_H] + // If it is a padding row or the last row of a message, the `hash` should be the + // [SHA256_H] builder .when( next.flags.is_padding_row() @@ -169,7 +172,8 @@ impl Sha256Air { } } - // Check if last row of a non-last block, the `hash` should be equal to the final hash of the current block + // Check if last row of a non-last block, the `hash` should be equal to the final hash of + // the current block for i in 0..SHA256_ROUNDS_PER_ROW { let prev_a = next.hash.a[i].map(|x| x.into()); let prev_e = next.hash.e[i].map(|x| x.into()); @@ -216,7 +220,8 @@ impl Sha256Air { .when(next.flags.is_digest_row) .assert_bool(carry.clone()); } - // constrain the final hash limbs two at a time since we can do two checks per interaction + // constrain the final hash limbs two at a time since we can do two checks per + // interaction for chunk in next.final_hash[i].chunks(2) { self.bitwise_lookup_bus .send_range(chunk[0], chunk[1]) @@ -237,13 +242,14 @@ impl Sha256Air { next[start_col..start_col + SHA256_ROUND_WIDTH].borrow(); let local_is_padding_row = local_cols.flags.is_padding_row(); - // Note that there will always be a padding row in the trace since the unpadded height is a multiple of 17. - // So the next row is padding iff the current block is the last block in the trace. + // Note that there will always be a padding row in the trace since the unpadded height is a + // multiple of 17. So the next row is padding iff the current block is the last + // block in the trace. let next_is_padding_row = next_cols.flags.is_padding_row(); // We check that the very last block has `is_last_block` set to true, which guarantees that - // there is at least one complete message. If other digest rows have `is_last_block` set to true, - // then the trace will be interpreted as containing multiple messages. + // there is at least one complete message. If other digest rows have `is_last_block` set to + // true, then the trace will be interpreted as containing multiple messages. builder .when(next_is_padding_row.clone()) .when(local_cols.flags.is_digest_row) @@ -361,7 +367,8 @@ impl Sha256Air { &self, builder: &mut AB, local: &Sha256DigestCols, - is_last_block_of_trace: AB::Expr, // note this indicates the last block of the trace, not the last block of the message + is_last_block_of_trace: AB::Expr, /* note this indicates the last block of the trace, + * not the last block of the message */ ) { // Constrain that next block's `prev_hash` is equal to the current block's `hash` let composed_hash: [[::Expr; SHA256_WORD_U16S]; SHA256_HASH_WORDS] = @@ -402,9 +409,9 @@ impl Sha256Air { } /// Constrain the message schedule additions for `next` row - /// Note: For every addition we need to constrain the following for each of [SHA256_WORD_U16S] limbs - /// sig_1(w_{t-2})[i] + w_{t-7}[i] + sig_0(w_{t-15})[i] + w_{t-16}[i] + carry_w[t][i-1] - carry_w[t][i] * 2^16 - w_t[i] == 0 - /// Refer to [https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf] + /// Note: For every addition we need to constrain the following for each of [SHA256_WORD_U16S] + /// limbs sig_1(w_{t-2})[i] + w_{t-7}[i] + sig_0(w_{t-15})[i] + w_{t-16}[i] + + /// carry_w[t][i-1] - carry_w[t][i] * 2^16 - w_t[i] == 0 Refer to [https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf] fn eval_message_schedule( &self, builder: &mut AB, @@ -429,12 +436,14 @@ impl Sha256Air { } // Constrain intermed for `next` row - // We will only constrain intermed_12 for rows [3, 14], and let it be unconstrained for other rows - // Other rows should put the needed value in intermed_12 to make the below summation constraint hold + // We will only constrain intermed_12 for rows [3, 14], and let it be unconstrained for + // other rows Other rows should put the needed value in intermed_12 to make the + // below summation constraint hold let is_row_3_14 = self .row_idx_encoder .contains_flag_range::(&next.flags.row_idx, 3..=14); - // We will only constrain intermed_8 for rows [2, 13], and let it unconstrained for other rows + // We will only constrain intermed_8 for rows [2, 13], and let it unconstrained for other + // rows let is_row_2_13 = self .row_idx_encoder .contains_flag_range::(&next.flags.row_idx, 2..=13); @@ -447,8 +456,10 @@ impl Sha256Air { let w_idx_limb = compose::(&w_idx[j * 16..(j + 1) * 16], 1); let sig_w_limb = compose::(&sig_w[j * 16..(j + 1) * 16], 1); - // We would like to constrain this only on rows 0..16, but we can't do a conditional check because the degree is already 3. - // So we must fill in `intermed_4` with dummy values on rows 0 and 16 to ensure the constraint holds on these rows. + // We would like to constrain this only on rows 0..16, but we can't do a conditional + // check because the degree is already 3. So we must fill in + // `intermed_4` with dummy values on rows 0 and 16 to ensure the constraint holds on + // these rows. builder.when_transition().assert_eq( next.schedule_helper.intermed_4[i][j], w_idx_limb + sig_w_limb, @@ -485,11 +496,14 @@ impl Sha256Air { }); // Constrain `W_{idx} = sig_1(W_{idx-2}) + W_{idx-7} + sig_0(W_{idx-15}) + W_{idx-16}` - // We would like to constrain this only on rows 4..16, but we can't do a conditional check because the degree of sum is already 3 - // So we must fill in `intermed_12` with dummy values on rows 0..3 and 15 and 16 to ensure the constraint holds on rows - // 0..4 and 16. Note that the dummy value goes in the previous row to make the current row's constraint hold. + // We would like to constrain this only on rows 4..16, but we can't do a conditional + // check because the degree of sum is already 3 So we must fill in + // `intermed_12` with dummy values on rows 0..3 and 15 and 16 to ensure the constraint + // holds on rows 0..4 and 16. Note that the dummy value goes in the previous + // row to make the current row's constraint hold. constraint_word_addition( - // Note: here we can't do a conditional check because the degree of sum is already 3 + // Note: here we can't do a conditional check because the degree of sum is already + // 3 &mut builder.when_transition(), &[&small_sig1_field::(&w[i + 2])], &[&w_7, &intermed_16], @@ -528,8 +542,9 @@ impl Sha256Air { let e = [local.work_vars.e, next.work_vars.e].concat(); for i in 0..SHA256_ROUNDS_PER_ROW { for j in 0..SHA256_WORD_U16S { - // Although we need carry_a <= 6 and carry_e <= 5, constraining carry_a, carry_e in [0, 2^8) is enough - // to prevent overflow and ensure the soundness of the addition we want to check + // Although we need carry_a <= 6 and carry_e <= 5, constraining carry_a, carry_e in + // [0, 2^8) is enough to prevent overflow and ensure the soundness + // of the addition we want to check self.bitwise_lookup_bus .send_range(local.work_vars.carry_a[i][j], local.work_vars.carry_e[i][j]) .eval(builder, local.flags.is_round_row); @@ -556,16 +571,19 @@ impl Sha256Air { }); // Constrain `a = h + sig_1(e) + ch(e, f, g) + K + W + sig_0(a) + Maj(a, b, c)` - // We have to enforce this constraint on all rows since the degree of the constraint is already 3. - // So, we must fill in `carry_a` with dummy values on digest rows to ensure the constraint holds. + // We have to enforce this constraint on all rows since the degree of the constraint is + // already 3. So, we must fill in `carry_a` with dummy values on digest rows + // to ensure the constraint holds. constraint_word_addition( builder, &[ - &e[i].map(|x| x.into()), // previous `h` + &e[i].map(|x| x.into()), // previous `h` &big_sig1_field::(&e[i + 3]), // sig_1 of previous `e` - &ch_field::(&e[i + 3], &e[i + 2], &e[i + 1]), // Ch of previous `e`, `f`, `g` - &big_sig0_field::(&a[i + 3]), // sig_0 of previous `a` - &maj_field::(&a[i + 3], &a[i + 2], &a[i + 1]), // Maj of previous a, b, c + &ch_field::(&e[i + 3], &e[i + 2], &e[i + 1]), /* Ch of previous + * `e`, `f`, `g` */ + &big_sig0_field::(&a[i + 3]), // sig_0 of previous `a` + &maj_field::(&a[i + 3], &a[i + 2], &a[i + 1]), /* Maj of previous + * a, b, c */ ], &[&w_limbs, &k_limbs], // K and W &a[i + 4], // new `a` @@ -573,15 +591,18 @@ impl Sha256Air { ); // Constrain `e = d + h + sig_1(e) + ch(e, f, g) + K + W` - // We have to enforce this constraint on all rows since the degree of the constraint is already 3. - // So, we must fill in `carry_e` with dummy values on digest rows to ensure the constraint holds. + // We have to enforce this constraint on all rows since the degree of the constraint is + // already 3. So, we must fill in `carry_e` with dummy values on digest rows + // to ensure the constraint holds. constraint_word_addition( builder, &[ - &a[i].map(|x| x.into()), // previous `d` - &e[i].map(|x| x.into()), // previous `h` - &big_sig1_field::(&e[i + 3]), // sig_1 of previous `e` - &ch_field::(&e[i + 3], &e[i + 2], &e[i + 1]), // Ch of previous `e`, `f`, `g` + &a[i].map(|x| x.into()), // previous `d` + &e[i].map(|x| x.into()), // previous `h` + &big_sig1_field::(&e[i + 3]), /* sig_1 of previous + * `e` */ + &ch_field::(&e[i + 3], &e[i + 2], &e[i + 1]), /* Ch of previous + * `e`, `f`, `g` */ ], &[&w_limbs, &k_limbs], // K and W &e[i + 4], // new `e` diff --git a/crates/circuits/sha256-air/src/columns.rs b/crates/circuits/sha256-air/src/columns.rs index 173aca0943..1c735394c3 100644 --- a/crates/circuits/sha256-air/src/columns.rs +++ b/crates/circuits/sha256-air/src/columns.rs @@ -12,8 +12,9 @@ use super::{ /// - First 16 rows use Sha256RoundCols /// - Final row uses Sha256DigestCols /// -/// Note that for soundness, we require that there is always a padding row after the last digest row in the trace. -/// Right now, this is true because the unpadded height is a multiple of 17, and thus not a power of 2. +/// Note that for soundness, we require that there is always a padding row after the last digest row +/// in the trace. Right now, this is true because the unpadded height is a multiple of 17, and thus +/// not a power of 2. /// /// Sha256RoundCols and Sha256DigestCols share the same first 3 fields: /// - flags @@ -22,7 +23,8 @@ use super::{ /// /// This design allows for: /// 1. Common constraints to work on either struct type by accessing these shared fields -/// 2. Specific constraints to use the appropriate struct, with flags helping to do conditional constraints +/// 2. Specific constraints to use the appropriate struct, with flags helping to do conditional +/// constraints /// /// Note that the `Sha256WorkVarsCols` field it is used for different purposes in the two structs. #[repr(C)] @@ -60,8 +62,9 @@ pub struct Sha256MessageScheduleCols { /// The message schedule words as 32-bit integers /// The first 16 words will be the message data pub w: [[T; SHA256_WORD_BITS]; SHA256_ROUNDS_PER_ROW], - /// Will be message schedule carries for rows 4..16 and a buffer for rows 0..4 to be used freely by wrapper chips - /// Note: carries are 2 bit numbers represented using 2 cells as individual bits + /// Will be message schedule carries for rows 4..16 and a buffer for rows 0..4 to be used + /// freely by wrapper chips Note: carries are 2 bit numbers represented using 2 cells as + /// individual bits pub carry_or_buffer: [[T; SHA256_WORD_U8S]; SHA256_ROUNDS_PER_ROW], } @@ -87,7 +90,8 @@ pub struct Sha256MessageHelperCols { pub w_3: [[T; SHA256_WORD_U16S]; SHA256_ROUNDS_PER_ROW - 1], /// Here intermediate(i) = w_i + sig_0(w_{i+1}) /// Intermed_t represents the intermediate t rounds ago - /// This is needed to constrain the message schedule, since we can only constrain on two rows at a time + /// This is needed to constrain the message schedule, since we can only constrain on two rows + /// at a time pub intermed_4: [[T; SHA256_WORD_U16S]; SHA256_ROUNDS_PER_ROW], pub intermed_8: [[T; SHA256_WORD_U16S]; SHA256_ROUNDS_PER_ROW], pub intermed_12: [[T; SHA256_WORD_U16S]; SHA256_ROUNDS_PER_ROW], @@ -117,14 +121,16 @@ pub struct Sha256FlagsCols { } impl> Sha256FlagsCols { - // This refers to the padding rows that are added to the air to make the trace length a power of 2. - // Not to be confused with the padding added to messages as part of the SHA hash function. + // This refers to the padding rows that are added to the air to make the trace length a power of + // 2. Not to be confused with the padding added to messages as part of the SHA hash + // function. pub fn is_not_padding_row(&self) -> O { self.is_round_row + self.is_digest_row } - // This refers to the padding rows that are added to the air to make the trace length a power of 2. - // Not to be confused with the padding added to messages as part of the SHA hash function. + // This refers to the padding rows that are added to the air to make the trace length a power of + // 2. Not to be confused with the padding added to messages as part of the SHA hash + // function. pub fn is_padding_row(&self) -> O where O: FieldAlgebra, diff --git a/crates/circuits/sha256-air/src/trace.rs b/crates/circuits/sha256-air/src/trace.rs index 3862cc0443..eaf9174f50 100644 --- a/crates/circuits/sha256-air/src/trace.rs +++ b/crates/circuits/sha256-air/src/trace.rs @@ -22,8 +22,9 @@ use crate::{ }; /// The trace generation of SHA256 should be done in two passes. -/// The first pass should do `get_block_trace` for every block and generate the invalid rows through `get_default_row` -/// The second pass should go through all the blocks and call `generate_missing_cells` +/// The first pass should do `get_block_trace` for every block and generate the invalid rows through +/// `get_default_row` The second pass should go through all the blocks and call +/// `generate_missing_cells` impl Sha256Air { /// This function takes the input_message (padding not handled), the previous hash, /// and returns the new hash after processing the block input @@ -37,12 +38,13 @@ impl Sha256Air { new_hash } - /// This function takes a 512-bit chunk of the input message (padding not handled), the previous hash, - /// a flag indicating if it's the last block, the global block index, the local block index, - /// and the buffer values that will be put in rows 0..4. - /// Will populate the given `trace` with the trace of the block, where the width of the trace is `trace_width` - /// and the starting column for the `Sha256Air` is `trace_start_col`. - /// **Note**: this function only generates some of the required trace. Another pass is required, refer to [`Self::generate_missing_cells`] for details. + /// This function takes a 512-bit chunk of the input message (padding not handled), the previous + /// hash, a flag indicating if it's the last block, the global block index, the local block + /// index, and the buffer values that will be put in rows 0..4. + /// Will populate the given `trace` with the trace of the block, where the width of the trace is + /// `trace_width` and the starting column for the `Sha256Air` is `trace_start_col`. + /// **Note**: this function only generates some of the required trace. Another pass is required, + /// refer to [`Self::generate_missing_cells`] for details. #[allow(clippy::too_many_arguments)] pub fn generate_block_trace( &self, @@ -285,14 +287,17 @@ impl Sha256Air { } if i == SHA256_ROWS_PER_BLOCK - 2 { // `next` is a digest row. - // Fill in `carry_a` and `carry_e` with dummy values so the constraints on `a` and `e` hold. + // Fill in `carry_a` and `carry_e` with dummy values so the constraints on `a` and + // `e` hold. Self::generate_carry_ae(local_cols, next_cols); - // Fill in row 16's `intermed_4` with dummy values so the message schedule constraints holds on that row + // Fill in row 16's `intermed_4` with dummy values so the message schedule + // constraints holds on that row Self::generate_intermed_4(local_cols, next_cols); } if i <= 2 { // i is in 0..3. - // Fill in `local.intermed_12` with dummy values so the message schedule constraints hold on rows 1..4. + // Fill in `local.intermed_12` with dummy values so the message schedule constraints + // hold on rows 1..4. Self::generate_intermed_12(local_cols, next_cols); } } @@ -320,11 +325,14 @@ impl Sha256Air { row_16[trace_start_col..trace_start_col + SHA256_ROUND_WIDTH].borrow_mut(); let cols_17: &mut Sha256RoundCols = row_17[trace_start_col..trace_start_col + SHA256_ROUND_WIDTH].borrow_mut(); - // Fill in row 15's `intermed_12` with dummy values so the message schedule constraints holds on row 16 + // Fill in row 15's `intermed_12` with dummy values so the message schedule constraints + // holds on row 16 Self::generate_intermed_12(cols_15, cols_16); - // Fill in row 16's `intermed_12` with dummy values so the message schedule constraints holds on the next block's row 0 + // Fill in row 16's `intermed_12` with dummy values so the message schedule constraints + // holds on the next block's row 0 Self::generate_intermed_12(cols_16, cols_17); - // Fill in row 0's `intermed_4` with dummy values so the message schedule constraints holds on that row + // Fill in row 0's `intermed_4` with dummy values so the message schedule constraints holds + // on that row Self::generate_intermed_4(cols_16, cols_17); } @@ -362,8 +370,8 @@ impl Sha256Air { }); } - /// The following functions do the calculations in native field since they will be called on padding rows - /// which can overflow and we need to make sure it matches the AIR constraints + /// The following functions do the calculations in native field since they will be called on + /// padding rows which can overflow and we need to make sure it matches the AIR constraints /// Puts the correct carrys in the `next_row`, the resulting carrys can be out of bound fn generate_carry_ae( local_cols: &Sha256RoundCols, diff --git a/crates/cli/Cargo.toml b/crates/cli/Cargo.toml index 03959bd4e0..a5d96e07a4 100644 --- a/crates/cli/Cargo.toml +++ b/crates/cli/Cargo.toml @@ -19,14 +19,14 @@ vergen = { version = "8", default-features = false, features = [ [dependencies] openvm-build = { workspace = true } openvm-transpiler = { workspace = true } -openvm-native-recursion = { workspace = true, features = ["static-verifier"] } +openvm-native-recursion = { workspace = true } openvm-sdk = { workspace = true } openvm-stark-sdk.workspace = true openvm-stark-backend.workspace = true aws-sdk-s3 = "1.78" aws-config = "1.5" -tokio = { version = "1.41.1", features = ["rt", "rt-multi-thread", "macros"] } +tokio = { version = "1.43.1", features = ["rt", "rt-multi-thread", "macros"] } clap = { version = "4.5.9", features = ["derive", "env"] } eyre.workspace = true tracing.workspace = true @@ -36,7 +36,19 @@ hex = "0.4.3" target-lexicon = "0.12.15" tempfile = "3.10.1" toml = { workspace = true } +bitcode.workspace = true [features] -default = [] +default = ["parallel", "jemalloc", "evm-verify", "bench-metrics"] +evm-prove = ["openvm-sdk/evm-prove"] +evm-verify = ["evm-prove", "openvm-sdk/evm-verify"] bench-metrics = ["openvm-sdk/bench-metrics"] +# for guest profiling: +profiling = ["openvm-sdk/profiling"] +# performance features: +# (rayon is always imported because of halo2, so "parallel" feature is redundant) +parallel = ["openvm-sdk/parallel"] +mimalloc = ["openvm-sdk/mimalloc"] +jemalloc = ["openvm-sdk/jemalloc"] +jemalloc-prof = ["openvm-sdk/jemalloc-prof"] +nightly-features = ["openvm-sdk/nightly-features"] diff --git a/crates/cli/src/bin/cargo-openvm.rs b/crates/cli/src/bin/cargo-openvm.rs index d61ea71607..d3002fdfdf 100644 --- a/crates/cli/src/bin/cargo-openvm.rs +++ b/crates/cli/src/bin/cargo-openvm.rs @@ -1,7 +1,4 @@ -use cargo_openvm::{ - commands::{BuildCmd, EvmProvingSetupCmd, KeygenCmd, ProveCmd, RunCmd, VerifyCmd}, - OPENVM_VERSION_MESSAGE, -}; +use cargo_openvm::{commands::*, OPENVM_VERSION_MESSAGE}; use clap::{Parser, Subcommand}; use eyre::Result; use openvm_stark_sdk::config::setup_tracing_with_log_level; @@ -27,6 +24,7 @@ pub enum VmCliCommands { Keygen(KeygenCmd), Prove(ProveCmd), Run(RunCmd), + #[cfg(feature = "evm-verify")] Setup(EvmProvingSetupCmd), Verify(VerifyCmd), } @@ -41,6 +39,7 @@ async fn main() -> Result<()> { VmCliCommands::Run(cmd) => cmd.run(), VmCliCommands::Keygen(cmd) => cmd.run(), VmCliCommands::Prove(cmd) => cmd.run(), + #[cfg(feature = "evm-verify")] VmCliCommands::Setup(cmd) => cmd.run().await, VmCliCommands::Verify(cmd) => cmd.run(), } diff --git a/crates/cli/src/commands/build.rs b/crates/cli/src/commands/build.rs index 75cd933401..6a2e0f2ba5 100644 --- a/crates/cli/src/commands/build.rs +++ b/crates/cli/src/commands/build.rs @@ -1,6 +1,7 @@ use std::{ - fs::{read, write}, + fs::{create_dir_all, read, write}, path::PathBuf, + sync::Arc, }; use clap::Parser; @@ -18,7 +19,7 @@ use openvm_transpiler::{elf::Elf, openvm_platform::memory::MEM_SIZE}; use crate::{ default::{ DEFAULT_APP_CONFIG_PATH, DEFAULT_APP_EXE_PATH, DEFAULT_COMMITTED_APP_EXE_PATH, - DEFAULT_MANIFEST_DIR, + DEFAULT_EXE_COMMIT_PATH, DEFAULT_MANIFEST_DIR, }, util::read_config_toml_or_default, }; @@ -83,6 +84,13 @@ pub struct BuildArgs { )] pub committed_exe_output: PathBuf, + #[arg( + long, + default_value = DEFAULT_EXE_COMMIT_PATH, + help = "Output path for the exe commit (bn254 commit of committed program)" + )] + pub exe_commit_output: PathBuf, + #[arg(long, default_value = "release", help = "Build profile")] pub profile: String, } @@ -146,10 +154,25 @@ pub(crate) fn build(build_args: &BuildArgs) -> Result> { let exe = Sdk::new().transpile(elf, transpiler)?; let committed_exe = commit_app_exe(app_config.app_fri_params.fri_params, exe.clone()); write_exe_to_file(exe, output_path)?; + + if let Some(parent) = build_args.exe_commit_output.parent() { + create_dir_all(parent)?; + } write( - &build_args.committed_exe_output, + &build_args.exe_commit_output, committed_exe_as_bn254(&committed_exe).value.to_bytes(), )?; + if let Some(parent) = build_args.committed_exe_output.parent() { + create_dir_all(parent)?; + } + let committed_exe = match Arc::try_unwrap(committed_exe) { + Ok(exe) => exe, + Err(_) => return Err(eyre::eyre!("Failed to unwrap committed_exe Arc")), + }; + write( + &build_args.committed_exe_output, + bitcode::serialize(&committed_exe)?, + )?; println!( "[openvm] Successfully transpiled to {}", @@ -189,6 +212,7 @@ mod tests { config: PathBuf::from(DEFAULT_APP_CONFIG_PATH), exe_output: PathBuf::from(DEFAULT_APP_EXE_PATH), committed_exe_output: PathBuf::from(DEFAULT_COMMITTED_APP_EXE_PATH), + exe_commit_output: PathBuf::from(DEFAULT_EXE_COMMIT_PATH), profile: "dev".to_string(), target_dir: Some(target_dir.to_path_buf()), }; diff --git a/crates/cli/src/commands/mod.rs b/crates/cli/src/commands/mod.rs index 949084d8f0..d34a45fd29 100644 --- a/crates/cli/src/commands/mod.rs +++ b/crates/cli/src/commands/mod.rs @@ -10,7 +10,9 @@ pub use prove::*; mod run; pub use run::*; +#[cfg(feature = "evm-verify")] mod setup; +#[cfg(feature = "evm-verify")] pub use setup::*; mod verify; diff --git a/crates/cli/src/commands/prove.rs b/crates/cli/src/commands/prove.rs index c2c1be894d..2dbc07add8 100644 --- a/crates/cli/src/commands/prove.rs +++ b/crates/cli/src/commands/prove.rs @@ -2,23 +2,21 @@ use std::{path::PathBuf, sync::Arc}; use clap::Parser; use eyre::Result; -use openvm_native_recursion::halo2::utils::CacheHalo2ParamsReader; use openvm_sdk::{ commit::AppExecutionCommit, config::SdkVmConfig, - fs::{ - read_agg_pk_from_file, read_app_pk_from_file, read_exe_from_file, write_app_proof_to_file, - write_evm_proof_to_file, - }, + fs::{read_app_pk_from_file, read_exe_from_file, write_app_proof_to_file}, keygen::AppProvingKey, NonRootCommittedExe, Sdk, StdIn, }; +#[cfg(feature = "evm-prove")] +use openvm_sdk::{ + config::AggregationTreeConfig, + fs::{read_agg_pk_from_file, write_evm_proof_to_file}, +}; use crate::{ - default::{ - DEFAULT_AGG_PK_PATH, DEFAULT_APP_EXE_PATH, DEFAULT_APP_PK_PATH, DEFAULT_APP_PROOF_PATH, - DEFAULT_EVM_PROOF_PATH, DEFAULT_PARAMS_DIR, - }, + default::*, input::{read_to_stdin, Input}, }; @@ -44,6 +42,7 @@ enum ProveSubCommand { #[arg(long, action, help = "Path to output proof", default_value = DEFAULT_APP_PROOF_PATH)] output: PathBuf, }, + #[cfg(feature = "evm-prove")] Evm { #[arg(long, action, help = "Path to app proving key", default_value = DEFAULT_APP_PK_PATH)] app_pk: PathBuf, @@ -56,6 +55,9 @@ enum ProveSubCommand { #[arg(long, action, help = "Path to output proof", default_value = DEFAULT_EVM_PROOF_PATH)] output: PathBuf, + + #[command(flatten)] + agg_tree_config: AggregationTreeConfig, }, } @@ -74,12 +76,18 @@ impl ProveCmd { let app_proof = sdk.generate_app_proof(app_pk, committed_exe, input)?; write_app_proof_to_file(app_proof, output)?; } + #[cfg(feature = "evm-prove")] ProveSubCommand::Evm { app_pk, exe, input, output, + agg_tree_config, } => { + use openvm_native_recursion::halo2::utils::CacheHalo2ParamsReader; + + let mut sdk = sdk; + sdk.set_agg_tree_config(*agg_tree_config); let params_reader = CacheHalo2ParamsReader::new(DEFAULT_PARAMS_DIR); let (app_pk, committed_exe, input) = Self::prepare_execution(&sdk, app_pk, exe, input)?; diff --git a/crates/cli/src/commands/setup.rs b/crates/cli/src/commands/setup.rs index ec0b58cb65..a62faaa0df 100644 --- a/crates/cli/src/commands/setup.rs +++ b/crates/cli/src/commands/setup.rs @@ -11,13 +11,13 @@ use openvm_native_recursion::halo2::utils::CacheHalo2ParamsReader; use openvm_sdk::{ config::AggConfig, fs::{ - write_agg_pk_to_file, write_evm_verifier_to_folder, EVM_VERIFIER_ARTIFACT_FILENAME, - EVM_VERIFIER_SOL_FILENAME, + write_agg_pk_to_file, write_evm_halo2_verifier_to_folder, EVM_HALO2_VERIFIER_BASE_NAME, + EVM_HALO2_VERIFIER_INTERFACE_NAME, EVM_HALO2_VERIFIER_PARENT_NAME, }, DefaultStaticVerifierPvHandler, Sdk, }; -use crate::default::{DEFAULT_AGG_PK_PATH, DEFAULT_PARAMS_DIR, DEFAULT_VERIFIER_FOLDER}; +use crate::default::{DEFAULT_AGG_PK_PATH, DEFAULT_EVM_HALO2_VERIFIER_PATH, DEFAULT_PARAMS_DIR}; #[derive(Parser)] #[command( @@ -29,11 +29,15 @@ pub struct EvmProvingSetupCmd {} impl EvmProvingSetupCmd { pub async fn run(&self) -> Result<()> { if PathBuf::from(DEFAULT_AGG_PK_PATH).exists() - && PathBuf::from(DEFAULT_VERIFIER_FOLDER) - .join(EVM_VERIFIER_ARTIFACT_FILENAME) + && PathBuf::from(DEFAULT_EVM_HALO2_VERIFIER_PATH) + .join(EVM_HALO2_VERIFIER_PARENT_NAME) .exists() - && PathBuf::from(DEFAULT_VERIFIER_FOLDER) - .join(EVM_VERIFIER_SOL_FILENAME) + && PathBuf::from(DEFAULT_EVM_HALO2_VERIFIER_PATH) + .join(EVM_HALO2_VERIFIER_BASE_NAME) + .exists() + && PathBuf::from(DEFAULT_EVM_HALO2_VERIFIER_PATH) + .join("interfaces") + .join(EVM_HALO2_VERIFIER_INTERFACE_NAME) .exists() { println!("Aggregation proving key and verifier contract already exist"); @@ -53,13 +57,13 @@ impl EvmProvingSetupCmd { let agg_pk = sdk.agg_keygen(agg_config, ¶ms_reader, &DefaultStaticVerifierPvHandler)?; println!("Generating verifier contract..."); - let verifier = sdk.generate_snark_verifier_contract(¶ms_reader, &agg_pk)?; + let verifier = sdk.generate_halo2_verifier_solidity(¶ms_reader, &agg_pk)?; println!("Writing proving key to file..."); write_agg_pk_to_file(agg_pk, DEFAULT_AGG_PK_PATH)?; println!("Writing verifier contract to file..."); - write_evm_verifier_to_folder(verifier, DEFAULT_VERIFIER_FOLDER)?; + write_evm_halo2_verifier_to_folder(verifier, DEFAULT_EVM_HALO2_VERIFIER_PATH)?; Ok(()) } diff --git a/crates/cli/src/commands/verify.rs b/crates/cli/src/commands/verify.rs index ca7e203a20..6651f3a7f4 100644 --- a/crates/cli/src/commands/verify.rs +++ b/crates/cli/src/commands/verify.rs @@ -3,16 +3,11 @@ use std::path::PathBuf; use clap::Parser; use eyre::Result; use openvm_sdk::{ - fs::{ - read_app_proof_from_file, read_app_vk_from_file, read_evm_proof_from_file, - read_evm_verifier_from_folder, - }, + fs::{read_app_proof_from_file, read_app_vk_from_file}, Sdk, }; -use crate::default::{ - DEFAULT_APP_PROOF_PATH, DEFAULT_APP_VK_PATH, DEFAULT_EVM_PROOF_PATH, DEFAULT_VERIFIER_FOLDER, -}; +use crate::default::*; #[derive(Parser)] #[command(name = "verify", about = "Verify a proof")] @@ -30,6 +25,7 @@ enum VerifySubCommand { #[arg(long, action, help = "Path to app proof", default_value = DEFAULT_APP_PROOF_PATH)] proof: PathBuf, }, + #[cfg(feature = "evm-verify")] Evm { #[arg(long, action, help = "Path to EVM proof", default_value = DEFAULT_EVM_PROOF_PATH)] proof: PathBuf, @@ -45,12 +41,17 @@ impl VerifyCmd { let app_proof = read_app_proof_from_file(proof)?; sdk.verify_app_proof(&app_vk, &app_proof)?; } + #[cfg(feature = "evm-verify")] VerifySubCommand::Evm { proof } => { - let evm_verifier = read_evm_verifier_from_folder(DEFAULT_VERIFIER_FOLDER).map_err(|e| { + use openvm_sdk::fs::{ + read_evm_halo2_verifier_from_folder, read_evm_proof_from_file, + }; + + let evm_verifier = read_evm_halo2_verifier_from_folder(DEFAULT_EVM_HALO2_VERIFIER_PATH).map_err(|e| { eyre::eyre!("Failed to read EVM verifier: {}\nPlease run 'cargo openvm evm-proving-setup' first", e) })?; let evm_proof = read_evm_proof_from_file(proof)?; - sdk.verify_evm_proof(&evm_verifier, &evm_proof)?; + sdk.verify_evm_halo2_proof(&evm_verifier, &evm_proof)?; } } Ok(()) diff --git a/crates/cli/src/default.rs b/crates/cli/src/default.rs index 44233b3c7c..3d1e9156ed 100644 --- a/crates/cli/src/default.rs +++ b/crates/cli/src/default.rs @@ -4,12 +4,14 @@ use openvm_stark_sdk::config::FriParameters; pub const DEFAULT_MANIFEST_DIR: &str = "."; pub const DEFAULT_AGG_PK_PATH: &str = concat!(env!("HOME"), "/.openvm/agg.pk"); -pub const DEFAULT_VERIFIER_FOLDER: &str = concat!(env!("HOME"), "/.openvm/"); pub const DEFAULT_PARAMS_DIR: &str = concat!(env!("HOME"), "/.openvm/params/"); +pub const DEFAULT_EVM_HALO2_VERIFIER_PATH: &str = concat!(env!("HOME"), "/.openvm/halo2/"); + pub const DEFAULT_APP_CONFIG_PATH: &str = "./openvm.toml"; pub const DEFAULT_APP_EXE_PATH: &str = "./openvm/app.vmexe"; -pub const DEFAULT_COMMITTED_APP_EXE_PATH: &str = "./openvm/committed_app_exe.bytes"; +pub const DEFAULT_EXE_COMMIT_PATH: &str = "./openvm/exe_commit.bytes"; +pub const DEFAULT_COMMITTED_APP_EXE_PATH: &str = "./openvm/committed_app_exe.bc"; pub const DEFAULT_APP_PK_PATH: &str = "./openvm/app.pk"; pub const DEFAULT_APP_VK_PATH: &str = "./openvm/app.vk"; pub const DEFAULT_APP_PROOF_PATH: &str = "./openvm/app.proof"; diff --git a/crates/cli/src/lib.rs b/crates/cli/src/lib.rs index 008db4b54a..edbbb4bc9d 100644 --- a/crates/cli/src/lib.rs +++ b/crates/cli/src/lib.rs @@ -1,7 +1,7 @@ pub mod commands; pub mod default; pub mod input; -mod util; +pub mod util; use std::process::{Command, Stdio}; diff --git a/crates/cli/src/util.rs b/crates/cli/src/util.rs index 7dc76a20d7..19938c64bb 100644 --- a/crates/cli/src/util.rs +++ b/crates/cli/src/util.rs @@ -15,7 +15,7 @@ pub(crate) fn read_to_struct_toml(path: &PathBuf) -> Result Ok(ret) } -pub(crate) fn read_config_toml_or_default(config: &PathBuf) -> Result> { +pub fn read_config_toml_or_default(config: &PathBuf) -> Result> { if config.exists() { read_to_struct_toml(config) } else { diff --git a/crates/continuations/src/verifier/internal/types.rs b/crates/continuations/src/verifier/internal/types.rs index 53b255ac7c..9512518843 100644 --- a/crates/continuations/src/verifier/internal/types.rs +++ b/crates/continuations/src/verifier/internal/types.rs @@ -76,9 +76,9 @@ impl InternalVmVerifierPvs> { pub struct InternalVmVerifierExtraPvs { /// The commitment of the leaf verifier program. pub leaf_verifier_commit: [T; DIGEST_SIZE], - /// For recursion verification, a program need its own commitment, but its own commitment cannot - /// be hardcoded inside the program itself. So the commitment has to be read from external and - /// be committed. + /// For recursion verification, a program need its own commitment, but its own commitment + /// cannot be hardcoded inside the program itself. So the commitment has to be read from + /// external and be committed. pub internal_program_commit: [T; DIGEST_SIZE], } diff --git a/crates/continuations/src/verifier/leaf/types.rs b/crates/continuations/src/verifier/leaf/types.rs index 63b41eca3e..16aca7a169 100644 --- a/crates/continuations/src/verifier/leaf/types.rs +++ b/crates/continuations/src/verifier/leaf/types.rs @@ -32,8 +32,8 @@ assert_impl_all!(LeafVmVerifierInput: Serialize, Deseri #[derive(Serialize, Deserialize, Clone, Debug)] pub struct UserPublicValuesRootProof { /// Sibling hashes for proving the merkle root of public values. For a specific VM, the path - /// is constant. So we don't need the boolean which indicates if a node is a left child or right - /// child. + /// is constant. So we don't need the boolean which indicates if a node is a left child or + /// right child. pub sibling_hashes: Vec<[F; DIGEST_SIZE]>, pub public_values_commit: [F; DIGEST_SIZE], } diff --git a/crates/continuations/src/verifier/leaf/vars.rs b/crates/continuations/src/verifier/leaf/vars.rs index 9fcf90d2db..860621f395 100644 --- a/crates/continuations/src/verifier/leaf/vars.rs +++ b/crates/continuations/src/verifier/leaf/vars.rs @@ -19,8 +19,8 @@ use crate::{ #[derive(DslVariable, Clone)] pub struct UserPublicValuesRootProofVariable { /// Sibling hashes for proving the merkle root of public values. For a specific VM, the path - /// is constant. So we don't need the boolean which indicates if a node is a left child or right - /// child. + /// is constant. So we don't need the boolean which indicates if a node is a left child or + /// right child. pub sibling_hashes: Array; CHUNK]>, pub public_values_commit: [Felt; CHUNK], } diff --git a/crates/prof/src/aggregate.rs b/crates/prof/src/aggregate.rs index 9d669870f2..0fac284343 100644 --- a/crates/prof/src/aggregate.rs +++ b/crates/prof/src/aggregate.rs @@ -381,4 +381,9 @@ pub const VM_METRIC_NAMES: &[&str] = &[ "quotient_poly_compute_time_ms", "quotient_poly_commit_time_ms", "pcs_opening_time_ms", + "sumcheck_prove_batch_ms", + "gkr_prove_batch_ms", + "gkr_gen_layers_ms", + "gkr_generate_aux", + "gkr_build_instances_ms", ]; diff --git a/crates/prof/src/lib.rs b/crates/prof/src/lib.rs index 65468fd0de..ab5823f21b 100644 --- a/crates/prof/src/lib.rs +++ b/crates/prof/src/lib.rs @@ -109,7 +109,8 @@ impl MetricDb { for label_keys in sorted_keys { if label_keys.contains(&"cycle_tracker_span".to_string()) { - // Skip cycle_tracker_span as it is too long for markdown and visualized in flamegraphs + // Skip cycle_tracker_span as it is too long for markdown and visualized in + // flamegraphs continue; } let metrics_dict = &self.dict_by_label_types[&label_keys]; diff --git a/crates/sdk/Cargo.toml b/crates/sdk/Cargo.toml index f3ca81462d..ea687ca7c7 100644 --- a/crates/sdk/Cargo.toml +++ b/crates/sdk/Cargo.toml @@ -34,6 +34,8 @@ openvm-circuit = { workspace = true } openvm-continuations = { workspace = true } openvm = { workspace = true } +alloy-primitives = { workspace = true, optional = true } +alloy-sol-types = { workspace = true, optional = true, features = ["json"] } bitcode = { workspace = true } bon = { workspace = true } derivative = { workspace = true } @@ -49,14 +51,32 @@ clap = { workspace = true, features = ["derive"] } serde_with = { workspace = true, features = ["hex"] } serde_json.workspace = true thiserror.workspace = true +snark-verifier = { workspace = true } +snark-verifier-sdk.workspace = true +tempfile.workspace = true +hex.workspace = true [features] -default = ["parallel"] +default = ["parallel", "jemalloc", "evm-verify"] +evm-prove = ["openvm-native-recursion/evm-prove"] +evm-verify = [ + "evm-prove", + "openvm-native-recursion/evm-verify", + "dep:alloy-primitives", + "dep:alloy-sol-types", +] bench-metrics = [ "openvm-circuit/bench-metrics", "openvm-native-recursion/bench-metrics", "openvm-native-compiler/bench-metrics", ] +# for guest profiling: profiling = ["openvm-circuit/function-span", "openvm-transpiler/function-span"] -parallel = ["openvm-circuit/parallel"] test-utils = ["openvm-circuit/test-utils"] +# performance features: +# (rayon is always imported because of halo2, so "parallel" feature is redundant) +parallel = ["openvm-circuit/parallel"] +mimalloc = ["openvm-circuit/mimalloc"] +jemalloc = ["openvm-circuit/jemalloc"] +jemalloc-prof = ["openvm-circuit/jemalloc-prof"] +nightly-features = ["openvm-circuit/nightly-features"] diff --git a/crates/sdk/contracts/.gitignore b/crates/sdk/contracts/.gitignore new file mode 100644 index 0000000000..85198aaa55 --- /dev/null +++ b/crates/sdk/contracts/.gitignore @@ -0,0 +1,14 @@ +# Compiler files +cache/ +out/ + +# Ignores development broadcast logs +!/broadcast +/broadcast/*/31337/ +/broadcast/**/dry-run/ + +# Docs +docs/ + +# Dotenv file +.env diff --git a/crates/sdk/contracts/abi/IOpenVmHalo2Verifier.json b/crates/sdk/contracts/abi/IOpenVmHalo2Verifier.json new file mode 100644 index 0000000000..7ee1df768b --- /dev/null +++ b/crates/sdk/contracts/abi/IOpenVmHalo2Verifier.json @@ -0,0 +1,30 @@ +[ + { + "type": "function", + "name": "verify", + "inputs": [ + { + "name": "publicValues", + "type": "bytes", + "internalType": "bytes" + }, + { + "name": "proofData", + "type": "bytes", + "internalType": "bytes" + }, + { + "name": "appExeCommit", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "appVmCommit", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "outputs": [], + "stateMutability": "view" + } +] \ No newline at end of file diff --git a/crates/sdk/contracts/foundry.toml b/crates/sdk/contracts/foundry.toml new file mode 100644 index 0000000000..1e28834e74 --- /dev/null +++ b/crates/sdk/contracts/foundry.toml @@ -0,0 +1,31 @@ +[profile.default] +src = "src" +out = "out" +libs = ["lib"] +verbosity = 2 +solc = "0.8.19" +optimizer = true +optimizer_runs = 100000 +evm_version = "shanghai" +show_progress = true +fs_permissions = [{ access = "read", path = "./template"}, { access = "read", path = "./test/helpers/MockDeps.sol"}] +ffi = true + +[profile.default.optimizer_details] + constantOptimizer = false + yul = false + +[fuzz] + runs = 256 + +[fmt] + bracket_spacing = true + int_types = "long" + line_length = 120 + multiline_func_header = "attributes_first" + number_underscore = "thousands" + quote_style = "double" + single_line_statement_blocks = "single" + tab_width = 4 + wrap_comments = false + \ No newline at end of file diff --git a/crates/sdk/contracts/lib/forge-std b/crates/sdk/contracts/lib/forge-std new file mode 160000 index 0000000000..3b20d60d14 --- /dev/null +++ b/crates/sdk/contracts/lib/forge-std @@ -0,0 +1 @@ +Subproject commit 3b20d60d14b343ee4f908cb8079495c07f5e8981 diff --git a/crates/sdk/contracts/src/IOpenVmHalo2Verifier.sol b/crates/sdk/contracts/src/IOpenVmHalo2Verifier.sol new file mode 100644 index 0000000000..ac8292cdae --- /dev/null +++ b/crates/sdk/contracts/src/IOpenVmHalo2Verifier.sol @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface IOpenVmHalo2Verifier { + function verify(bytes calldata publicValues, bytes calldata proofData, bytes32 appExeCommit, bytes32 appVmCommit) + external + view; +} diff --git a/crates/sdk/contracts/template/OpenVmHalo2Verifier.sol b/crates/sdk/contracts/template/OpenVmHalo2Verifier.sol new file mode 100644 index 0000000000..f82d641fc5 --- /dev/null +++ b/crates/sdk/contracts/template/OpenVmHalo2Verifier.sol @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import { Halo2Verifier } from "./Halo2Verifier.sol"; +import { IOpenVmHalo2Verifier } from "./interfaces/IOpenVmHalo2Verifier.sol"; + +type MemoryPointer is uint256; + +/// @notice This contract provides a thin wrapper around the Halo2 verifier +/// outputted by `snark-verifier`, exposing a more user-friendly interface. +contract OpenVmHalo2Verifier is Halo2Verifier, IOpenVmHalo2Verifier { + /// @dev Invalid public values length + error InvalidPublicValuesLength(uint256 expected, uint256 actual); + + /// @dev Invalid proof data length + error InvalidProofDataLength(uint256 expected, uint256 actual); + + /// @dev Proof verification failed + error ProofVerificationFailed(); + + /// @dev The length of the proof data, in bytes. + uint256 private constant PROOF_DATA_LENGTH = (12 + 43) * 32; + + /// @dev The length of the public values, in bytes. This value is set by + /// OpenVM and is guaranteed to be no larger than 8192. + uint256 private constant PUBLIC_VALUES_LENGTH = {PUBLIC_VALUES_LENGTH}; + + /// @dev The length of the full proof, in bytes + uint256 private constant FULL_PROOF_LENGTH = (12 + 2 + PUBLIC_VALUES_LENGTH + 43) * 32; + + /// @dev The version of OpenVM that generated this verifier. + string public constant OPENVM_VERSION = "{OPENVM_VERSION}"; + + /// @notice A wrapper that constructs the proof into the right format for + /// use with the `snark-verifier` verification. + /// + /// @dev The verifier expected proof format is: + /// proof[..12 * 32]: KZG accumulators + /// proof[12 * 32..13 * 32]: app exe commit + /// proof[13 * 32..14 * 32]: app vm commit + /// proof[14 * 32..(14 + PUBLIC_VALUES_LENGTH) * 32]: publicValues[0..PUBLIC_VALUES_LENGTH] + /// proof[(14 + PUBLIC_VALUES_LENGTH) * 32..]: Proof Suffix + /// + /// @param publicValues The PVs revealed by the OpenVM guest program. + /// @param proofData All components of the proof except the public values and + /// app exe and vm commits. The expected format is: + /// `abi.encodePacked(kzgAccumulators, proofSuffix)` + /// @param appExeCommit The commitment to the OpenVM application executable whose execution + /// is being verified. + /// @param appVmCommit The commitment to the VM configuration. + function verify(bytes calldata publicValues, bytes calldata proofData, bytes32 appExeCommit, bytes32 appVmCommit) external view { + if (publicValues.length != PUBLIC_VALUES_LENGTH) revert InvalidPublicValuesLength(PUBLIC_VALUES_LENGTH, publicValues.length); + if (proofData.length != PROOF_DATA_LENGTH) revert InvalidProofDataLength(PROOF_DATA_LENGTH, proofData.length); + + // We will format the public values and construct the full proof payload + // below. + + MemoryPointer proofPtr = _constructProof(publicValues, proofData, appExeCommit, appVmCommit); + + uint256 fullProofLength = FULL_PROOF_LENGTH; + + /// @solidity memory-safe-assembly + assembly { + // Self-call using the proof as calldata + if iszero(staticcall(gas(), address(), proofPtr, fullProofLength, 0, 0)) { + mstore(0x00, 0xd611c318) // ProofVerificationFailed() + revert(0x1c, 0x04) + } + } + } + + /// @dev The assembly code should perform the same function as the following + /// solidity code: + // + /// ```solidity + /// bytes memory proof = + /// abi.encodePacked(proofData[0:0x180], appExeCommit, appVmCommit, publicValuesPayload, proofData[0x180:]); + /// ``` + // + /// where `publicValuesPayload` is a memory payload with each byte in + /// `publicValues` separated into its own `bytes32` word. + /// + /// This function does not clean the memory it allocates. Since it is the + /// only memory write that occurs in the call frame, we know that + /// the memory region cannot have been dirtied. + /// + /// @return proofPtr Memory pointer to the beginning of the constructed + /// proof. This pointer does not follow `bytes memory` semantics. + function _constructProof(bytes calldata publicValues, bytes calldata proofData, bytes32 appExeCommit, bytes32 appVmCommit) + internal + pure + returns (MemoryPointer proofPtr) + { + uint256 fullProofLength = FULL_PROOF_LENGTH; + + // The expected proof format using hex offsets: + // + // proof[..0x180]: KZG accumulators + // proof[0x180..0x1a0]: app exe commit + // proof[0x1a0..0x1c0]: app vm commit + // proof[0x1c0..(0x1c0 + PUBLIC_VALUES_LENGTH * 32)]: publicValues[0..PUBLIC_VALUES_LENGTH] + // proof[(0x1c0 + PUBLIC_VALUES_LENGTH * 32)..]: Proof Suffix + + /// @solidity memory-safe-assembly + assembly { + proofPtr := mload(0x40) + // Allocate the memory as a safety measure. + mstore(0x40, add(proofPtr, fullProofLength)) + + // Copy the KZG accumulators (length 0x180) into the beginning of + // the memory buffer + calldatacopy(proofPtr, proofData.offset, 0x180) + + // Copy the App Exe Commit and App Vm Commit into the memory buffer + mstore(add(proofPtr, 0x180), appExeCommit) + mstore(add(proofPtr, 0x1a0), appVmCommit) + + // Copy the Proof Suffix (length 43 * 32 = 0x560) into the + // end of the memory buffer, leaving PUBLIC_VALUES_LENGTH words in + // between for the publicValuesPayload. + // + // Begin copying from the end of the KZG accumulators in the + // calldata buffer (0x180) + let proofSuffixOffset := add(0x1c0, shl(5, PUBLIC_VALUES_LENGTH)) + calldatacopy(add(proofPtr, proofSuffixOffset), add(proofData.offset, 0x180), 0x560) + + // Copy each byte of the public values into the proof. It copies the + // most significant bytes of public values first. + let publicValuesMemOffset := add(add(proofPtr, 0x1c0), 0x1f) + for { let i := 0 } iszero(eq(i, PUBLIC_VALUES_LENGTH)) { i := add(i, 1) } { + calldatacopy(add(publicValuesMemOffset, shl(5, i)), add(publicValues.offset, i), 0x01) + } + } + } +} diff --git a/crates/sdk/contracts/test/OpenVmHalo2Verifier.t.sol b/crates/sdk/contracts/test/OpenVmHalo2Verifier.t.sol new file mode 100644 index 0000000000..3b57df9829 --- /dev/null +++ b/crates/sdk/contracts/test/OpenVmHalo2Verifier.t.sol @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import { LibString } from "./helpers/LibString.sol"; +import { Test, console2, safeconsole as console } from "forge-std/Test.sol"; +import { IOpenVmHalo2Verifier } from "../src/IOpenVmHalo2Verifier.sol"; + +contract TemplateTest is Test { + bytes proofData; + bytes32 appExeCommit = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF; + bytes32 appVmCommit = 0xEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE; + bytes guestPvs; + + uint256 publicValuesLength; + uint256 fullProofWords; + uint256 fullProofLength; + + string _code = vm.readFile("template/OpenVmHalo2Verifier.sol"); + string deps = vm.readFile("test/helpers/MockDeps.sol"); + + function setUp() public { + proofData = new bytes(55 * 32); + for (uint256 i = 0; i < 55; i++) { + for (uint256 j = 0; j < 32; j++) { + proofData[i * 32 + j] = bytes1(uint8(i)); + } + } + } + + /// forge-config: default.fuzz.runs = 10 + function testFuzz_ProofFormat(uint256 _publicValuesLength) public { + publicValuesLength = bound(_publicValuesLength, 1, 10_000); + publicValuesLength = 8; + fullProofWords = (12 + 2 + publicValuesLength + 43); + fullProofLength = fullProofWords * 32; + + guestPvs = new bytes(publicValuesLength); + for (uint256 i = 0; i < publicValuesLength; i++) { + guestPvs[i] = bytes1(uint8(i)); + } + + IOpenVmHalo2Verifier verifier = _compileAndDeployOpenVmVerifier(publicValuesLength); + + (bool success,) = address(verifier).delegatecall( + abi.encodeCall(IOpenVmHalo2Verifier.verify, (guestPvs, proofData, appExeCommit, appVmCommit)) + ); + require(success, "Verification failed"); + } + + fallback(bytes calldata proof) external returns (bytes memory) { + bytes memory proofDataExpected = proofData; + + uint256 proofSuffixOffset = 0x1c0 + (32 * publicValuesLength); + + bytes memory kzgAccumulators = proof[0:0x180]; + bytes memory proofSuffix = proof[proofSuffixOffset:]; + bytes memory _proofData = abi.encodePacked(kzgAccumulators, proofSuffix); + + require(keccak256(_proofData) == keccak256(proofDataExpected), "Partial proof mismatch"); + + bytes memory _appExeCommit = proof[0x180:0x1a0]; + bytes memory _appVmCommit = proof[0x1a0:0x1c0]; + + require(bytes32(_appExeCommit) == appExeCommit, "App exe commit mismatch"); + require(bytes32(_appVmCommit) == appVmCommit, "App vm commit mismatch"); + + bytes calldata _guestPvs = proof[0x1c0:0x1c0 + 32 * publicValuesLength]; + for (uint256 i = 0; i < publicValuesLength; ++i) { + uint256 expected = uint256(uint8(guestPvs[i])); + uint256 actual = uint256(bytes32(_guestPvs[i * 32:(i + 1) * 32])); + require(expected == actual, "Guest PVs hash mismatch"); + } + + // Suppress return value warning + assembly { + return(0x00, 0x00) + } + } + + function test_RevertWhen_InvalidPublicValuesLength() public { + publicValuesLength = 32; + IOpenVmHalo2Verifier verifier = _compileAndDeployOpenVmVerifier(publicValuesLength); + + bytes memory invalidPvs = new bytes(0); + bytes4 sig = bytes4(keccak256("InvalidPublicValuesLength(uint256,uint256)")); + + vm.expectRevert(abi.encodeWithSelector(sig, 32, invalidPvs.length)); + verifier.verify(invalidPvs, hex"", bytes32(0), bytes32(0)); + } + + function test_RevertWhen_InvalidProofDataLength() public { + publicValuesLength = 32; + IOpenVmHalo2Verifier verifier = _compileAndDeployOpenVmVerifier(publicValuesLength); + + bytes memory invalidProofData = new bytes(0); + bytes4 sig = bytes4(keccak256("InvalidProofDataLength(uint256,uint256)")); + + bytes memory pvs = new bytes(publicValuesLength); + + vm.expectRevert(abi.encodeWithSelector(sig, 55 * 32, invalidProofData.length)); + verifier.verify(pvs, invalidProofData, appExeCommit, appVmCommit); + } + + function test_RevertWhen_ProofVerificationFailed() public { + publicValuesLength = 32; + IOpenVmHalo2Verifier verifier = _compileAndDeployOpenVmVerifier(publicValuesLength); + + bytes memory _proofData = new bytes(55 * 32); + bytes memory pvs = new bytes(publicValuesLength); + + bytes4 sig = bytes4(keccak256("ProofVerificationFailed()")); + + vm.expectRevert(abi.encodeWithSelector(sig)); + verifier.verify(pvs, _proofData, appExeCommit, appVmCommit); + } + + function _compileAndDeployOpenVmVerifier(uint256 _publicValuesLength) + private + returns (IOpenVmHalo2Verifier verifier) + { + string memory code = LibString.replace(_code, "{PUBLIC_VALUES_LENGTH}", LibString.toString(_publicValuesLength)); + + // `code` will look like this: + // + // // SPDX-License-Identifier: MIT + // pragma solidity 0.8.19; + // + // import { Halo2Verifier } ... + // import { IOpenVmHalo2Verifier } ... + // + // contract OpenVmHalo2Verifier { .. } + // + // We want to replace the `import` statements with inlined deps for JIT + // compilation. + string memory inlinedCode = LibString.replace( + code, + "import { Halo2Verifier } from \"./Halo2Verifier.sol\";\nimport { IOpenVmHalo2Verifier } from \"./interfaces/IOpenVmHalo2Verifier.sol\";", + deps + ); + + // Must use solc 0.8.19 + string[] memory commands = new string[](3); + commands[0] = "sh"; + commands[1] = "-c"; + commands[2] = string.concat( + "echo ", + "'", + inlinedCode, + "'", + " | solc --no-optimize-yul --bin --optimize --optimize-runs 100000 - ", + " | awk 'BEGIN{found=0} /:OpenVmHalo2Verifier/ {found=1; next} found && /^Binary:/ {getline; print; exit}'" + ); + + bytes memory compiledVerifier = vm.ffi(commands); + + assembly { + verifier := create(0, add(compiledVerifier, 0x20), mload(compiledVerifier)) + if iszero(extcodesize(verifier)) { revert(0, 0) } + } + } +} diff --git a/crates/sdk/contracts/test/helpers/LibString.sol b/crates/sdk/contracts/test/helpers/LibString.sol new file mode 100644 index 0000000000..f046fa40d0 --- /dev/null +++ b/crates/sdk/contracts/test/helpers/LibString.sol @@ -0,0 +1,1628 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +/// @notice Library for byte related operations. +/// @author Solady (https://github.com/vectorized/solady/blob/main/src/utils/LibBytes.sol) +library LibBytes { + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* STRUCTS */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + + /// @dev Goated bytes storage struct that totally MOGs, no cap, fr. + /// Uses less gas and bytecode than Solidity's native bytes storage. It's meta af. + /// Packs length with the first 31 bytes if <255 bytes, so it’s mad tight. + struct BytesStorage { + bytes32 _spacer; + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* CONSTANTS */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + + /// @dev The constant returned when the `search` is not found in the bytes. + uint256 internal constant NOT_FOUND = type(uint256).max; + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* BYTE STORAGE OPERATIONS */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + + /// @dev Sets the value of the bytes storage `$` to `s`. + function set(BytesStorage storage $, bytes memory s) internal { + /// @solidity memory-safe-assembly + assembly { + let n := mload(s) + let packed := or(0xff, shl(8, n)) + for { let i := 0 } 1 { } { + if iszero(gt(n, 0xfe)) { + i := 0x1f + packed := or(n, shl(8, mload(add(s, i)))) + if iszero(gt(n, i)) { break } + } + let o := add(s, 0x20) + mstore(0x00, $.slot) + for { let p := keccak256(0x00, 0x20) } 1 { } { + sstore(add(p, shr(5, i)), mload(add(o, i))) + i := add(i, 0x20) + if iszero(lt(i, n)) { break } + } + break + } + sstore($.slot, packed) + } + } + + /// @dev Sets the value of the bytes storage `$` to `s`. + function setCalldata(BytesStorage storage $, bytes calldata s) internal { + /// @solidity memory-safe-assembly + assembly { + let packed := or(0xff, shl(8, s.length)) + for { let i := 0 } 1 { } { + if iszero(gt(s.length, 0xfe)) { + i := 0x1f + packed := or(s.length, shl(8, shr(8, calldataload(s.offset)))) + if iszero(gt(s.length, i)) { break } + } + mstore(0x00, $.slot) + for { let p := keccak256(0x00, 0x20) } 1 { } { + sstore(add(p, shr(5, i)), calldataload(add(s.offset, i))) + i := add(i, 0x20) + if iszero(lt(i, s.length)) { break } + } + break + } + sstore($.slot, packed) + } + } + + /// @dev Sets the value of the bytes storage `$` to the empty bytes. + function clear(BytesStorage storage $) internal { + delete $._spacer; + } + + /// @dev Returns whether the value stored is `$` is the empty bytes "". + function isEmpty(BytesStorage storage $) internal view returns (bool) { + return uint256($._spacer) & 0xff == uint256(0); + } + + /// @dev Returns the length of the value stored in `$`. + function length(BytesStorage storage $) internal view returns (uint256 result) { + result = uint256($._spacer); + /// @solidity memory-safe-assembly + assembly { + let n := and(0xff, result) + result := or(mul(shr(8, result), eq(0xff, n)), mul(n, iszero(eq(0xff, n)))) + } + } + + /// @dev Returns the value stored in `$`. + function get(BytesStorage storage $) internal view returns (bytes memory result) { + /// @solidity memory-safe-assembly + assembly { + result := mload(0x40) + let o := add(result, 0x20) + let packed := sload($.slot) + let n := shr(8, packed) + for { let i := 0 } 1 { } { + if iszero(eq(or(packed, 0xff), packed)) { + mstore(o, packed) + n := and(0xff, packed) + i := 0x1f + if iszero(gt(n, i)) { break } + } + mstore(0x00, $.slot) + for { let p := keccak256(0x00, 0x20) } 1 { } { + mstore(add(o, i), sload(add(p, shr(5, i)))) + i := add(i, 0x20) + if iszero(lt(i, n)) { break } + } + break + } + mstore(result, n) // Store the length of the memory. + mstore(add(o, n), 0) // Zeroize the slot after the bytes. + mstore(0x40, add(add(o, n), 0x20)) // Allocate memory. + } + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* BYTES OPERATIONS */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + + /// @dev Returns `subject` all occurrences of `needle` replaced with `replacement`. + function replace(bytes memory subject, bytes memory needle, bytes memory replacement) + internal + pure + returns (bytes memory result) + { + /// @solidity memory-safe-assembly + assembly { + result := mload(0x40) + let needleLen := mload(needle) + let replacementLen := mload(replacement) + let d := sub(result, subject) // Memory difference. + let i := add(subject, 0x20) // Subject bytes pointer. + mstore(0x00, add(i, mload(subject))) // End of subject. + if iszero(gt(needleLen, mload(subject))) { + let subjectSearchEnd := add(sub(mload(0x00), needleLen), 1) + let h := 0 // The hash of `needle`. + if iszero(lt(needleLen, 0x20)) { h := keccak256(add(needle, 0x20), needleLen) } + let s := mload(add(needle, 0x20)) + for { let m := shl(3, sub(0x20, and(needleLen, 0x1f))) } 1 { } { + let t := mload(i) + // Whether the first `needleLen % 32` bytes of `subject` and `needle` matches. + if iszero(shr(m, xor(t, s))) { + if h { + if iszero(eq(keccak256(i, needleLen), h)) { + mstore(add(i, d), t) + i := add(i, 1) + if iszero(lt(i, subjectSearchEnd)) { break } + continue + } + } + // Copy the `replacement` one word at a time. + for { let j := 0 } 1 { } { + mstore(add(add(i, d), j), mload(add(add(replacement, 0x20), j))) + j := add(j, 0x20) + if iszero(lt(j, replacementLen)) { break } + } + d := sub(add(d, replacementLen), needleLen) + if needleLen { + i := add(i, needleLen) + if iszero(lt(i, subjectSearchEnd)) { break } + continue + } + } + mstore(add(i, d), t) + i := add(i, 1) + if iszero(lt(i, subjectSearchEnd)) { break } + } + } + let end := mload(0x00) + let n := add(sub(d, add(result, 0x20)), end) + // Copy the rest of the bytes one word at a time. + for { } lt(i, end) { i := add(i, 0x20) } { mstore(add(i, d), mload(i)) } + let o := add(i, d) + mstore(o, 0) // Zeroize the slot after the bytes. + mstore(0x40, add(o, 0x20)) // Allocate memory. + mstore(result, n) // Store the length. + } + } + + /// @dev Returns the byte index of the first location of `needle` in `subject`, + /// needleing from left to right, starting from `from`. + /// Returns `NOT_FOUND` (i.e. `type(uint256).max`) if the `needle` is not found. + function indexOf(bytes memory subject, bytes memory needle, uint256 from) internal pure returns (uint256 result) { + /// @solidity memory-safe-assembly + assembly { + result := not(0) // Initialize to `NOT_FOUND`. + for { let subjectLen := mload(subject) } 1 { } { + if iszero(mload(needle)) { + result := from + if iszero(gt(from, subjectLen)) { break } + result := subjectLen + break + } + let needleLen := mload(needle) + let subjectStart := add(subject, 0x20) + + subject := add(subjectStart, from) + let end := add(sub(add(subjectStart, subjectLen), needleLen), 1) + let m := shl(3, sub(0x20, and(needleLen, 0x1f))) + let s := mload(add(needle, 0x20)) + + if iszero(and(lt(subject, end), lt(from, subjectLen))) { break } + + if iszero(lt(needleLen, 0x20)) { + for { let h := keccak256(add(needle, 0x20), needleLen) } 1 { } { + if iszero(shr(m, xor(mload(subject), s))) { + if eq(keccak256(subject, needleLen), h) { + result := sub(subject, subjectStart) + break + } + } + subject := add(subject, 1) + if iszero(lt(subject, end)) { break } + } + break + } + for { } 1 { } { + if iszero(shr(m, xor(mload(subject), s))) { + result := sub(subject, subjectStart) + break + } + subject := add(subject, 1) + if iszero(lt(subject, end)) { break } + } + break + } + } + } + + /// @dev Returns the byte index of the first location of `needle` in `subject`, + /// needleing from left to right. + /// Returns `NOT_FOUND` (i.e. `type(uint256).max`) if the `needle` is not found. + function indexOf(bytes memory subject, bytes memory needle) internal pure returns (uint256) { + return indexOf(subject, needle, 0); + } + + /// @dev Returns the byte index of the first location of `needle` in `subject`, + /// needleing from right to left, starting from `from`. + /// Returns `NOT_FOUND` (i.e. `type(uint256).max`) if the `needle` is not found. + function lastIndexOf(bytes memory subject, bytes memory needle, uint256 from) + internal + pure + returns (uint256 result) + { + /// @solidity memory-safe-assembly + assembly { + for { } 1 { } { + result := not(0) // Initialize to `NOT_FOUND`. + let needleLen := mload(needle) + if gt(needleLen, mload(subject)) { break } + let w := result + + let fromMax := sub(mload(subject), needleLen) + if iszero(gt(fromMax, from)) { from := fromMax } + + let end := add(add(subject, 0x20), w) + subject := add(add(subject, 0x20), from) + if iszero(gt(subject, end)) { break } + // As this function is not too often used, + // we shall simply use keccak256 for smaller bytecode size. + for { let h := keccak256(add(needle, 0x20), needleLen) } 1 { } { + if eq(keccak256(subject, needleLen), h) { + result := sub(subject, add(end, 1)) + break + } + subject := add(subject, w) // `sub(subject, 1)`. + if iszero(gt(subject, end)) { break } + } + break + } + } + } + + /// @dev Returns the byte index of the first location of `needle` in `subject`, + /// needleing from right to left. + /// Returns `NOT_FOUND` (i.e. `type(uint256).max`) if the `needle` is not found. + function lastIndexOf(bytes memory subject, bytes memory needle) internal pure returns (uint256) { + return lastIndexOf(subject, needle, type(uint256).max); + } + + /// @dev Returns true if `needle` is found in `subject`, false otherwise. + function contains(bytes memory subject, bytes memory needle) internal pure returns (bool) { + return indexOf(subject, needle) != NOT_FOUND; + } + + /// @dev Returns whether `subject` starts with `needle`. + function startsWith(bytes memory subject, bytes memory needle) internal pure returns (bool result) { + /// @solidity memory-safe-assembly + assembly { + let n := mload(needle) + // Just using keccak256 directly is actually cheaper. + let t := eq(keccak256(add(subject, 0x20), n), keccak256(add(needle, 0x20), n)) + result := lt(gt(n, mload(subject)), t) + } + } + + /// @dev Returns whether `subject` ends with `needle`. + function endsWith(bytes memory subject, bytes memory needle) internal pure returns (bool result) { + /// @solidity memory-safe-assembly + assembly { + let n := mload(needle) + let notInRange := gt(n, mload(subject)) + // `subject + 0x20 + max(subject.length - needle.length, 0)`. + let t := add(add(subject, 0x20), mul(iszero(notInRange), sub(mload(subject), n))) + // Just using keccak256 directly is actually cheaper. + result := gt(eq(keccak256(t, n), keccak256(add(needle, 0x20), n)), notInRange) + } + } + + /// @dev Returns `subject` repeated `times`. + function repeat(bytes memory subject, uint256 times) internal pure returns (bytes memory result) { + /// @solidity memory-safe-assembly + assembly { + let l := mload(subject) // Subject length. + if iszero(or(iszero(times), iszero(l))) { + result := mload(0x40) + subject := add(subject, 0x20) + let o := add(result, 0x20) + for { } 1 { } { + // Copy the `subject` one word at a time. + for { let j := 0 } 1 { } { + mstore(add(o, j), mload(add(subject, j))) + j := add(j, 0x20) + if iszero(lt(j, l)) { break } + } + o := add(o, l) + times := sub(times, 1) + if iszero(times) { break } + } + mstore(o, 0) // Zeroize the slot after the bytes. + mstore(0x40, add(o, 0x20)) // Allocate memory. + mstore(result, sub(o, add(result, 0x20))) // Store the length. + } + } + } + + /// @dev Returns a copy of `subject` sliced from `start` to `end` (exclusive). + /// `start` and `end` are byte offsets. + function slice(bytes memory subject, uint256 start, uint256 end) internal pure returns (bytes memory result) { + /// @solidity memory-safe-assembly + assembly { + let l := mload(subject) // Subject length. + if iszero(gt(l, end)) { end := l } + if iszero(gt(l, start)) { start := l } + if lt(start, end) { + result := mload(0x40) + let n := sub(end, start) + let i := add(subject, start) + let w := not(0x1f) + // Copy the `subject` one word at a time, backwards. + for { let j := and(add(n, 0x1f), w) } 1 { } { + mstore(add(result, j), mload(add(i, j))) + j := add(j, w) // `sub(j, 0x20)`. + if iszero(j) { break } + } + let o := add(add(result, 0x20), n) + mstore(o, 0) // Zeroize the slot after the bytes. + mstore(0x40, add(o, 0x20)) // Allocate memory. + mstore(result, n) // Store the length. + } + } + } + + /// @dev Returns a copy of `subject` sliced from `start` to the end of the bytes. + /// `start` is a byte offset. + function slice(bytes memory subject, uint256 start) internal pure returns (bytes memory result) { + result = slice(subject, start, type(uint256).max); + } + + /// @dev Returns a copy of `subject` sliced from `start` to `end` (exclusive). + /// `start` and `end` are byte offsets. Faster than Solidity's native slicing. + function sliceCalldata(bytes calldata subject, uint256 start, uint256 end) + internal + pure + returns (bytes calldata result) + { + /// @solidity memory-safe-assembly + assembly { + end := xor(end, mul(xor(end, subject.length), lt(subject.length, end))) + start := xor(start, mul(xor(start, subject.length), lt(subject.length, start))) + result.offset := add(subject.offset, start) + result.length := mul(lt(start, end), sub(end, start)) + } + } + + /// @dev Returns a copy of `subject` sliced from `start` to the end of the bytes. + /// `start` is a byte offset. Faster than Solidity's native slicing. + function sliceCalldata(bytes calldata subject, uint256 start) internal pure returns (bytes calldata result) { + /// @solidity memory-safe-assembly + assembly { + start := xor(start, mul(xor(start, subject.length), lt(subject.length, start))) + result.offset := add(subject.offset, start) + result.length := mul(lt(start, subject.length), sub(subject.length, start)) + } + } + + /// @dev Reduces the size of `subject` to `n`. + /// If `n` is greater than the size of `subject`, this will be a no-op. + function truncate(bytes memory subject, uint256 n) internal pure returns (bytes memory result) { + /// @solidity memory-safe-assembly + assembly { + result := subject + mstore(mul(lt(n, mload(result)), result), n) + } + } + + /// @dev Returns a copy of `subject`, with the length reduced to `n`. + /// If `n` is greater than the size of `subject`, this will be a no-op. + function truncatedCalldata(bytes calldata subject, uint256 n) internal pure returns (bytes calldata result) { + /// @solidity memory-safe-assembly + assembly { + result.offset := subject.offset + result.length := xor(n, mul(xor(n, subject.length), lt(subject.length, n))) + } + } + + /// @dev Returns all the indices of `needle` in `subject`. + /// The indices are byte offsets. + function indicesOf(bytes memory subject, bytes memory needle) internal pure returns (uint256[] memory result) { + /// @solidity memory-safe-assembly + assembly { + let searchLen := mload(needle) + if iszero(gt(searchLen, mload(subject))) { + result := mload(0x40) + let i := add(subject, 0x20) + let o := add(result, 0x20) + let subjectSearchEnd := add(sub(add(i, mload(subject)), searchLen), 1) + let h := 0 // The hash of `needle`. + if iszero(lt(searchLen, 0x20)) { h := keccak256(add(needle, 0x20), searchLen) } + let s := mload(add(needle, 0x20)) + for { let m := shl(3, sub(0x20, and(searchLen, 0x1f))) } 1 { } { + let t := mload(i) + // Whether the first `searchLen % 32` bytes of `subject` and `needle` matches. + if iszero(shr(m, xor(t, s))) { + if h { + if iszero(eq(keccak256(i, searchLen), h)) { + i := add(i, 1) + if iszero(lt(i, subjectSearchEnd)) { break } + continue + } + } + mstore(o, sub(i, add(subject, 0x20))) // Append to `result`. + o := add(o, 0x20) + i := add(i, searchLen) // Advance `i` by `searchLen`. + if searchLen { + if iszero(lt(i, subjectSearchEnd)) { break } + continue + } + } + i := add(i, 1) + if iszero(lt(i, subjectSearchEnd)) { break } + } + mstore(result, shr(5, sub(o, add(result, 0x20)))) // Store the length of `result`. + // Allocate memory for result. + // We allocate one more word, so this array can be recycled for {split}. + mstore(0x40, add(o, 0x20)) + } + } + } + + /// @dev Returns an arrays of bytess based on the `delimiter` inside of the `subject` bytes. + function split(bytes memory subject, bytes memory delimiter) internal pure returns (bytes[] memory result) { + uint256[] memory indices = indicesOf(subject, delimiter); + /// @solidity memory-safe-assembly + assembly { + let w := not(0x1f) + let indexPtr := add(indices, 0x20) + let indicesEnd := add(indexPtr, shl(5, add(mload(indices), 1))) + mstore(add(indicesEnd, w), mload(subject)) + mstore(indices, add(mload(indices), 1)) + for { let prevIndex := 0 } 1 { } { + let index := mload(indexPtr) + mstore(indexPtr, 0x60) + if iszero(eq(index, prevIndex)) { + let element := mload(0x40) + let l := sub(index, prevIndex) + mstore(element, l) // Store the length of the element. + // Copy the `subject` one word at a time, backwards. + for { let o := and(add(l, 0x1f), w) } 1 { } { + mstore(add(element, o), mload(add(add(subject, prevIndex), o))) + o := add(o, w) // `sub(o, 0x20)`. + if iszero(o) { break } + } + mstore(add(add(element, 0x20), l), 0) // Zeroize the slot after the bytes. + // Allocate memory for the length and the bytes, rounded up to a multiple of 32. + mstore(0x40, add(element, and(add(l, 0x3f), w))) + mstore(indexPtr, element) // Store the `element` into the array. + } + prevIndex := add(index, mload(delimiter)) + indexPtr := add(indexPtr, 0x20) + if iszero(lt(indexPtr, indicesEnd)) { break } + } + result := indices + if iszero(mload(delimiter)) { + result := add(indices, 0x20) + mstore(result, sub(mload(indices), 2)) + } + } + } + + /// @dev Returns a concatenated bytes of `a` and `b`. + /// Cheaper than `bytes.concat()` and does not de-align the free memory pointer. + function concat(bytes memory a, bytes memory b) internal pure returns (bytes memory result) { + /// @solidity memory-safe-assembly + assembly { + result := mload(0x40) + let w := not(0x1f) + let aLen := mload(a) + // Copy `a` one word at a time, backwards. + for { let o := and(add(aLen, 0x20), w) } 1 { } { + mstore(add(result, o), mload(add(a, o))) + o := add(o, w) // `sub(o, 0x20)`. + if iszero(o) { break } + } + let bLen := mload(b) + let output := add(result, aLen) + // Copy `b` one word at a time, backwards. + for { let o := and(add(bLen, 0x20), w) } 1 { } { + mstore(add(output, o), mload(add(b, o))) + o := add(o, w) // `sub(o, 0x20)`. + if iszero(o) { break } + } + let totalLen := add(aLen, bLen) + let last := add(add(result, 0x20), totalLen) + mstore(last, 0) // Zeroize the slot after the bytes. + mstore(result, totalLen) // Store the length. + mstore(0x40, add(last, 0x20)) // Allocate memory. + } + } + + /// @dev Returns whether `a` equals `b`. + function eq(bytes memory a, bytes memory b) internal pure returns (bool result) { + /// @solidity memory-safe-assembly + assembly { + result := eq(keccak256(add(a, 0x20), mload(a)), keccak256(add(b, 0x20), mload(b))) + } + } + + /// @dev Returns whether `a` equals `b`, where `b` is a null-terminated small bytes. + function eqs(bytes memory a, bytes32 b) internal pure returns (bool result) { + /// @solidity memory-safe-assembly + assembly { + // These should be evaluated on compile time, as far as possible. + let m := not(shl(7, div(not(iszero(b)), 255))) // `0x7f7f ...`. + let x := not(or(m, or(b, add(m, and(b, m))))) + let r := shl(7, iszero(iszero(shr(128, x)))) + r := or(r, shl(6, iszero(iszero(shr(64, shr(r, x)))))) + r := or(r, shl(5, lt(0xffffffff, shr(r, x)))) + r := or(r, shl(4, lt(0xffff, shr(r, x)))) + r := or(r, shl(3, lt(0xff, shr(r, x)))) + // forgefmt: disable-next-item + result := gt(eq(mload(a), add(iszero(x), xor(31, shr(3, r)))), + xor(shr(add(8, r), b), shr(add(8, r), mload(add(a, 0x20))))) + } + } + + /// @dev Returns 0 if `a == b`, -1 if `a < b`, +1 if `a > b`. + /// If `a` == b[:a.length]`, and `a.length < b.length`, returns -1. + function cmp(bytes memory a, bytes memory b) internal pure returns (int256 result) { + /// @solidity memory-safe-assembly + assembly { + let aLen := mload(a) + let bLen := mload(b) + let n := and(xor(aLen, mul(xor(aLen, bLen), lt(bLen, aLen))), not(0x1f)) + if n { + for { let i := 0x20 } 1 { } { + let x := mload(add(a, i)) + let y := mload(add(b, i)) + if iszero(or(xor(x, y), eq(i, n))) { + i := add(i, 0x20) + continue + } + result := sub(gt(x, y), lt(x, y)) + break + } + } + // forgefmt: disable-next-item + if iszero(result) { + let l := 0x201f1e1d1c1b1a191817161514131211100f0e0d0c0b0a090807060504030201 + let x := and(mload(add(add(a, 0x20), n)), shl(shl(3, byte(sub(aLen, n), l)), not(0))) + let y := and(mload(add(add(b, 0x20), n)), shl(shl(3, byte(sub(bLen, n), l)), not(0))) + result := sub(gt(x, y), lt(x, y)) + if iszero(result) { result := sub(gt(aLen, bLen), lt(aLen, bLen)) } + } + } + } + + /// @dev Directly returns `a` without copying. + function directReturn(bytes memory a) internal pure { + /// @solidity memory-safe-assembly + assembly { + // Assumes that the bytes does not start from the scratch space. + let retStart := sub(a, 0x20) + let retUnpaddedSize := add(mload(a), 0x40) + // Right pad with zeroes. Just in case the bytes is produced + // by a method that doesn't zero right pad. + mstore(add(retStart, retUnpaddedSize), 0) + mstore(retStart, 0x20) // Store the return offset. + // End the transaction, returning the bytes. + return(retStart, and(not(0x1f), add(0x1f, retUnpaddedSize))) + } + } + + /// @dev Directly returns `a` with minimal copying. + function directReturn(bytes[] memory a) internal pure { + /// @solidity memory-safe-assembly + assembly { + let n := mload(a) // `a.length`. + let o := add(a, 0x20) // Start of elements in `a`. + let u := a // Highest memory slot. + let w := not(0x1f) + for { let i := 0 } iszero(eq(i, n)) { i := add(i, 1) } { + let c := add(o, shl(5, i)) // Location of pointer to `a[i]`. + let s := mload(c) // `a[i]`. + let l := mload(s) // `a[i].length`. + let r := and(l, 0x1f) // `a[i].length % 32`. + let z := add(0x20, and(l, w)) // Offset of last word in `a[i]` from `s`. + // If `s` comes before `o`, or `s` is not zero right padded. + if iszero(lt(lt(s, o), or(iszero(r), iszero(shl(shl(3, r), mload(add(s, z))))))) { + let m := mload(0x40) + mstore(m, l) // Copy `a[i].length`. + for { } 1 { } { + mstore(add(m, z), mload(add(s, z))) // Copy `a[i]`, backwards. + z := add(z, w) // `sub(z, 0x20)`. + if iszero(z) { break } + } + let e := add(add(m, 0x20), l) + mstore(e, 0) // Zeroize the slot after the copied bytes. + mstore(0x40, add(e, 0x20)) // Allocate memory. + s := m + } + mstore(c, sub(s, o)) // Convert to calldata offset. + let t := add(l, add(s, 0x20)) + if iszero(lt(t, u)) { u := t } + } + let retStart := add(a, w) // Assumes `a` doesn't start from scratch space. + mstore(retStart, 0x20) // Store the return offset. + return(retStart, add(0x40, sub(u, retStart))) // End the transaction. + } + } + + /// @dev Returns the word at `offset`, without any bounds checks. + function load(bytes memory a, uint256 offset) internal pure returns (bytes32 result) { + /// @solidity memory-safe-assembly + assembly { + result := mload(add(add(a, 0x20), offset)) + } + } + + /// @dev Returns the word at `offset`, without any bounds checks. + function loadCalldata(bytes calldata a, uint256 offset) internal pure returns (bytes32 result) { + /// @solidity memory-safe-assembly + assembly { + result := calldataload(add(a.offset, offset)) + } + } + + /// @dev Returns a slice representing a static struct in the calldata. Performs bounds checks. + function staticStructInCalldata(bytes calldata a, uint256 offset) internal pure returns (bytes calldata result) { + /// @solidity memory-safe-assembly + assembly { + let l := sub(a.length, 0x20) + result.offset := add(a.offset, offset) + result.length := sub(a.length, offset) + if or(shr(64, or(l, a.offset)), gt(offset, l)) { revert(l, 0x00) } + } + } + + /// @dev Returns a slice representing a dynamic struct in the calldata. Performs bounds checks. + function dynamicStructInCalldata(bytes calldata a, uint256 offset) internal pure returns (bytes calldata result) { + /// @solidity memory-safe-assembly + assembly { + let l := sub(a.length, 0x20) + let s := calldataload(add(a.offset, offset)) // Relative offset of `result` from `a.offset`. + result.offset := add(a.offset, s) + result.length := sub(a.length, s) + if or(shr(64, or(s, or(l, a.offset))), gt(offset, l)) { revert(l, 0x00) } + } + } + + /// @dev Returns bytes in calldata. Performs bounds checks. + function bytesInCalldata(bytes calldata a, uint256 offset) internal pure returns (bytes calldata result) { + /// @solidity memory-safe-assembly + assembly { + let l := sub(a.length, 0x20) + let s := calldataload(add(a.offset, offset)) // Relative offset of `result` from `a.offset`. + result.offset := add(add(a.offset, s), 0x20) + result.length := calldataload(add(a.offset, s)) + // forgefmt: disable-next-item + if or(shr(64, or(result.length, or(s, or(l, a.offset)))), + or(gt(add(s, result.length), l), gt(offset, l))) { revert(l, 0x00) } + } + } + + /// @dev Returns empty calldata bytes. For silencing the compiler. + function emptyCalldata() internal pure returns (bytes calldata result) { + /// @solidity memory-safe-assembly + assembly { + result.length := 0 + } + } +} + +/// @notice Library for converting numbers into strings and other string operations. +/// @author Solady (https://github.com/vectorized/solady/blob/main/src/utils/LibString.sol) +/// @author Modified from Solmate (https://github.com/transmissions11/solmate/blob/main/src/utils/LibString.sol) +/// +/// @dev Note: +/// For performance and bytecode compactness, most of the string operations are restricted to +/// byte strings (7-bit ASCII), except where otherwise specified. +/// Usage of byte string operations on charsets with runes spanning two or more bytes +/// can lead to undefined behavior. +library LibString { + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* STRUCTS */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + + /// @dev Goated string storage struct that totally MOGs, no cap, fr. + /// Uses less gas and bytecode than Solidity's native string storage. It's meta af. + /// Packs length with the first 31 bytes if <255 bytes, so it’s mad tight. + struct StringStorage { + bytes32 _spacer; + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* CUSTOM ERRORS */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + + /// @dev The length of the output is too small to contain all the hex digits. + error HexLengthInsufficient(); + + /// @dev The length of the string is more than 32 bytes. + error TooBigForSmallString(); + + /// @dev The input string must be a 7-bit ASCII. + error StringNot7BitASCII(); + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* CONSTANTS */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + + /// @dev The constant returned when the `search` is not found in the string. + uint256 internal constant NOT_FOUND = type(uint256).max; + + /// @dev Lookup for '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'. + uint128 internal constant ALPHANUMERIC_7_BIT_ASCII = 0x7fffffe07fffffe03ff000000000000; + + /// @dev Lookup for 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'. + uint128 internal constant LETTERS_7_BIT_ASCII = 0x7fffffe07fffffe0000000000000000; + + /// @dev Lookup for 'abcdefghijklmnopqrstuvwxyz'. + uint128 internal constant LOWERCASE_7_BIT_ASCII = 0x7fffffe000000000000000000000000; + + /// @dev Lookup for 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'. + uint128 internal constant UPPERCASE_7_BIT_ASCII = 0x7fffffe0000000000000000; + + /// @dev Lookup for '0123456789'. + uint128 internal constant DIGITS_7_BIT_ASCII = 0x3ff000000000000; + + /// @dev Lookup for '0123456789abcdefABCDEF'. + uint128 internal constant HEXDIGITS_7_BIT_ASCII = 0x7e0000007e03ff000000000000; + + /// @dev Lookup for '01234567'. + uint128 internal constant OCTDIGITS_7_BIT_ASCII = 0xff000000000000; + + /// @dev Lookup for '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ \t\n\r\x0b\x0c'. + uint128 internal constant PRINTABLE_7_BIT_ASCII = 0x7fffffffffffffffffffffff00003e00; + + /// @dev Lookup for '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'. + uint128 internal constant PUNCTUATION_7_BIT_ASCII = 0x78000001f8000001fc00fffe00000000; + + /// @dev Lookup for ' \t\n\r\x0b\x0c'. + uint128 internal constant WHITESPACE_7_BIT_ASCII = 0x100003e00; + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* STRING STORAGE OPERATIONS */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + + /// @dev Sets the value of the string storage `$` to `s`. + function set(StringStorage storage $, string memory s) internal { + LibBytes.set(bytesStorage($), bytes(s)); + } + + /// @dev Sets the value of the string storage `$` to `s`. + function setCalldata(StringStorage storage $, string calldata s) internal { + LibBytes.setCalldata(bytesStorage($), bytes(s)); + } + + /// @dev Sets the value of the string storage `$` to the empty string. + function clear(StringStorage storage $) internal { + delete $._spacer; + } + + /// @dev Returns whether the value stored is `$` is the empty string "". + function isEmpty(StringStorage storage $) internal view returns (bool) { + return uint256($._spacer) & 0xff == uint256(0); + } + + /// @dev Returns the length of the value stored in `$`. + function length(StringStorage storage $) internal view returns (uint256) { + return LibBytes.length(bytesStorage($)); + } + + /// @dev Returns the value stored in `$`. + function get(StringStorage storage $) internal view returns (string memory) { + return string(LibBytes.get(bytesStorage($))); + } + + /// @dev Helper to cast `$` to a `BytesStorage`. + function bytesStorage(StringStorage storage $) internal pure returns (LibBytes.BytesStorage storage casted) { + /// @solidity memory-safe-assembly + assembly { + casted.slot := $.slot + } + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* DECIMAL OPERATIONS */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + + /// @dev Returns the base 10 decimal representation of `value`. + function toString(uint256 value) internal pure returns (string memory result) { + /// @solidity memory-safe-assembly + assembly { + // The maximum value of a uint256 contains 78 digits (1 byte per digit), but + // we allocate 0xa0 bytes to keep the free memory pointer 32-byte word aligned. + // We will need 1 word for the trailing zeros padding, 1 word for the length, + // and 3 words for a maximum of 78 digits. + result := add(mload(0x40), 0x80) + mstore(0x40, add(result, 0x20)) // Allocate memory. + mstore(result, 0) // Zeroize the slot after the string. + + let end := result // Cache the end of the memory to calculate the length later. + let w := not(0) // Tsk. + // We write the string from rightmost digit to leftmost digit. + // The following is essentially a do-while loop that also handles the zero case. + for { let temp := value } 1 { } { + result := add(result, w) // `sub(result, 1)`. + // Store the character to the pointer. + // The ASCII index of the '0' character is 48. + mstore8(result, add(48, mod(temp, 10))) + temp := div(temp, 10) // Keep dividing `temp` until zero. + if iszero(temp) { break } + } + let n := sub(end, result) + result := sub(result, 0x20) // Move the pointer 32 bytes back to make room for the length. + mstore(result, n) // Store the length. + } + } + + /// @dev Returns the base 10 decimal representation of `value`. + function toString(int256 value) internal pure returns (string memory result) { + if (value >= 0) return toString(uint256(value)); + unchecked { + result = toString(~uint256(value) + 1); + } + /// @solidity memory-safe-assembly + assembly { + // We still have some spare memory space on the left, + // as we have allocated 3 words (96 bytes) for up to 78 digits. + let n := mload(result) // Load the string length. + mstore(result, 0x2d) // Store the '-' character. + result := sub(result, 1) // Move back the string pointer by a byte. + mstore(result, add(n, 1)) // Update the string length. + } + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* HEXADECIMAL OPERATIONS */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + + /// @dev Returns the hexadecimal representation of `value`, + /// left-padded to an input length of `byteCount` bytes. + /// The output is prefixed with "0x" encoded using 2 hexadecimal digits per byte, + /// giving a total length of `byteCount * 2 + 2` bytes. + /// Reverts if `byteCount` is too small for the output to contain all the digits. + function toHexString(uint256 value, uint256 byteCount) internal pure returns (string memory result) { + result = toHexStringNoPrefix(value, byteCount); + /// @solidity memory-safe-assembly + assembly { + let n := add(mload(result), 2) // Compute the length. + mstore(result, 0x3078) // Store the "0x" prefix. + result := sub(result, 2) // Move the pointer. + mstore(result, n) // Store the length. + } + } + + /// @dev Returns the hexadecimal representation of `value`, + /// left-padded to an input length of `byteCount` bytes. + /// The output is not prefixed with "0x" and is encoded using 2 hexadecimal digits per byte, + /// giving a total length of `byteCount * 2` bytes. + /// Reverts if `byteCount` is too small for the output to contain all the digits. + function toHexStringNoPrefix(uint256 value, uint256 byteCount) internal pure returns (string memory result) { + /// @solidity memory-safe-assembly + assembly { + // We need 0x20 bytes for the trailing zeros padding, `byteCount * 2` bytes + // for the digits, 0x02 bytes for the prefix, and 0x20 bytes for the length. + // We add 0x20 to the total and round down to a multiple of 0x20. + // (0x20 + 0x20 + 0x02 + 0x20) = 0x62. + result := add(mload(0x40), and(add(shl(1, byteCount), 0x42), not(0x1f))) + mstore(0x40, add(result, 0x20)) // Allocate memory. + mstore(result, 0) // Zeroize the slot after the string. + + let end := result // Cache the end to calculate the length later. + // Store "0123456789abcdef" in scratch space. + mstore(0x0f, 0x30313233343536373839616263646566) + + let start := sub(result, add(byteCount, byteCount)) + let w := not(1) // Tsk. + let temp := value + // We write the string from rightmost digit to leftmost digit. + // The following is essentially a do-while loop that also handles the zero case. + for { } 1 { } { + result := add(result, w) // `sub(result, 2)`. + mstore8(add(result, 1), mload(and(temp, 15))) + mstore8(result, mload(and(shr(4, temp), 15))) + temp := shr(8, temp) + if iszero(xor(result, start)) { break } + } + if temp { + mstore(0x00, 0x2194895a) // `HexLengthInsufficient()`. + revert(0x1c, 0x04) + } + let n := sub(end, result) + result := sub(result, 0x20) + mstore(result, n) // Store the length. + } + } + + /// @dev Returns the hexadecimal representation of `value`. + /// The output is prefixed with "0x" and encoded using 2 hexadecimal digits per byte. + /// As address are 20 bytes long, the output will left-padded to have + /// a length of `20 * 2 + 2` bytes. + function toHexString(uint256 value) internal pure returns (string memory result) { + result = toHexStringNoPrefix(value); + /// @solidity memory-safe-assembly + assembly { + let n := add(mload(result), 2) // Compute the length. + mstore(result, 0x3078) // Store the "0x" prefix. + result := sub(result, 2) // Move the pointer. + mstore(result, n) // Store the length. + } + } + + /// @dev Returns the hexadecimal representation of `value`. + /// The output is prefixed with "0x". + /// The output excludes leading "0" from the `toHexString` output. + /// `0x00: "0x0", 0x01: "0x1", 0x12: "0x12", 0x123: "0x123"`. + function toMinimalHexString(uint256 value) internal pure returns (string memory result) { + result = toHexStringNoPrefix(value); + /// @solidity memory-safe-assembly + assembly { + let o := eq(byte(0, mload(add(result, 0x20))), 0x30) // Whether leading zero is present. + let n := add(mload(result), 2) // Compute the length. + mstore(add(result, o), 0x3078) // Store the "0x" prefix, accounting for leading zero. + result := sub(add(result, o), 2) // Move the pointer, accounting for leading zero. + mstore(result, sub(n, o)) // Store the length, accounting for leading zero. + } + } + + /// @dev Returns the hexadecimal representation of `value`. + /// The output excludes leading "0" from the `toHexStringNoPrefix` output. + /// `0x00: "0", 0x01: "1", 0x12: "12", 0x123: "123"`. + function toMinimalHexStringNoPrefix(uint256 value) internal pure returns (string memory result) { + result = toHexStringNoPrefix(value); + /// @solidity memory-safe-assembly + assembly { + let o := eq(byte(0, mload(add(result, 0x20))), 0x30) // Whether leading zero is present. + let n := mload(result) // Get the length. + result := add(result, o) // Move the pointer, accounting for leading zero. + mstore(result, sub(n, o)) // Store the length, accounting for leading zero. + } + } + + /// @dev Returns the hexadecimal representation of `value`. + /// The output is encoded using 2 hexadecimal digits per byte. + /// As address are 20 bytes long, the output will left-padded to have + /// a length of `20 * 2` bytes. + function toHexStringNoPrefix(uint256 value) internal pure returns (string memory result) { + /// @solidity memory-safe-assembly + assembly { + // We need 0x20 bytes for the trailing zeros padding, 0x20 bytes for the length, + // 0x02 bytes for the prefix, and 0x40 bytes for the digits. + // The next multiple of 0x20 above (0x20 + 0x20 + 0x02 + 0x40) is 0xa0. + result := add(mload(0x40), 0x80) + mstore(0x40, add(result, 0x20)) // Allocate memory. + mstore(result, 0) // Zeroize the slot after the string. + + let end := result // Cache the end to calculate the length later. + mstore(0x0f, 0x30313233343536373839616263646566) // Store the "0123456789abcdef" lookup. + + let w := not(1) // Tsk. + // We write the string from rightmost digit to leftmost digit. + // The following is essentially a do-while loop that also handles the zero case. + for { let temp := value } 1 { } { + result := add(result, w) // `sub(result, 2)`. + mstore8(add(result, 1), mload(and(temp, 15))) + mstore8(result, mload(and(shr(4, temp), 15))) + temp := shr(8, temp) + if iszero(temp) { break } + } + let n := sub(end, result) + result := sub(result, 0x20) + mstore(result, n) // Store the length. + } + } + + /// @dev Returns the hexadecimal representation of `value`. + /// The output is prefixed with "0x", encoded using 2 hexadecimal digits per byte, + /// and the alphabets are capitalized conditionally according to + /// https://eips.ethereum.org/EIPS/eip-55 + function toHexStringChecksummed(address value) internal pure returns (string memory result) { + result = toHexString(value); + /// @solidity memory-safe-assembly + assembly { + let mask := shl(6, div(not(0), 255)) // `0b010000000100000000 ...` + let o := add(result, 0x22) + let hashed := and(keccak256(o, 40), mul(34, mask)) // `0b10001000 ... ` + let t := shl(240, 136) // `0b10001000 << 240` + for { let i := 0 } 1 { } { + mstore(add(i, i), mul(t, byte(i, hashed))) + i := add(i, 1) + if eq(i, 20) { break } + } + mstore(o, xor(mload(o), shr(1, and(mload(0x00), and(mload(o), mask))))) + o := add(o, 0x20) + mstore(o, xor(mload(o), shr(1, and(mload(0x20), and(mload(o), mask))))) + } + } + + /// @dev Returns the hexadecimal representation of `value`. + /// The output is prefixed with "0x" and encoded using 2 hexadecimal digits per byte. + function toHexString(address value) internal pure returns (string memory result) { + result = toHexStringNoPrefix(value); + /// @solidity memory-safe-assembly + assembly { + let n := add(mload(result), 2) // Compute the length. + mstore(result, 0x3078) // Store the "0x" prefix. + result := sub(result, 2) // Move the pointer. + mstore(result, n) // Store the length. + } + } + + /// @dev Returns the hexadecimal representation of `value`. + /// The output is encoded using 2 hexadecimal digits per byte. + function toHexStringNoPrefix(address value) internal pure returns (string memory result) { + /// @solidity memory-safe-assembly + assembly { + result := mload(0x40) + // Allocate memory. + // We need 0x20 bytes for the trailing zeros padding, 0x20 bytes for the length, + // 0x02 bytes for the prefix, and 0x28 bytes for the digits. + // The next multiple of 0x20 above (0x20 + 0x20 + 0x02 + 0x28) is 0x80. + mstore(0x40, add(result, 0x80)) + mstore(0x0f, 0x30313233343536373839616263646566) // Store the "0123456789abcdef" lookup. + + result := add(result, 2) + mstore(result, 40) // Store the length. + let o := add(result, 0x20) + mstore(add(o, 40), 0) // Zeroize the slot after the string. + value := shl(96, value) + // We write the string from rightmost digit to leftmost digit. + // The following is essentially a do-while loop that also handles the zero case. + for { let i := 0 } 1 { } { + let p := add(o, add(i, i)) + let temp := byte(i, value) + mstore8(add(p, 1), mload(and(temp, 15))) + mstore8(p, mload(shr(4, temp))) + i := add(i, 1) + if eq(i, 20) { break } + } + } + } + + /// @dev Returns the hex encoded string from the raw bytes. + /// The output is encoded using 2 hexadecimal digits per byte. + function toHexString(bytes memory raw) internal pure returns (string memory result) { + result = toHexStringNoPrefix(raw); + /// @solidity memory-safe-assembly + assembly { + let n := add(mload(result), 2) // Compute the length. + mstore(result, 0x3078) // Store the "0x" prefix. + result := sub(result, 2) // Move the pointer. + mstore(result, n) // Store the length. + } + } + + /// @dev Returns the hex encoded string from the raw bytes. + /// The output is encoded using 2 hexadecimal digits per byte. + function toHexStringNoPrefix(bytes memory raw) internal pure returns (string memory result) { + /// @solidity memory-safe-assembly + assembly { + let n := mload(raw) + result := add(mload(0x40), 2) // Skip 2 bytes for the optional prefix. + mstore(result, add(n, n)) // Store the length of the output. + + mstore(0x0f, 0x30313233343536373839616263646566) // Store the "0123456789abcdef" lookup. + let o := add(result, 0x20) + let end := add(raw, n) + for { } iszero(eq(raw, end)) { } { + raw := add(raw, 1) + mstore8(add(o, 1), mload(and(mload(raw), 15))) + mstore8(o, mload(and(shr(4, mload(raw)), 15))) + o := add(o, 2) + } + mstore(o, 0) // Zeroize the slot after the string. + mstore(0x40, add(o, 0x20)) // Allocate memory. + } + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* RUNE STRING OPERATIONS */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + + /// @dev Returns the number of UTF characters in the string. + function runeCount(string memory s) internal pure returns (uint256 result) { + /// @solidity memory-safe-assembly + assembly { + if mload(s) { + mstore(0x00, div(not(0), 255)) + mstore(0x20, 0x0202020202020202020202020202020202020202020202020303030304040506) + let o := add(s, 0x20) + let end := add(o, mload(s)) + for { result := 1 } 1 { result := add(result, 1) } { + o := add(o, byte(0, mload(shr(250, mload(o))))) + if iszero(lt(o, end)) { break } + } + } + } + } + + /// @dev Returns if this string is a 7-bit ASCII string. + /// (i.e. all characters codes are in [0..127]) + function is7BitASCII(string memory s) internal pure returns (bool result) { + /// @solidity memory-safe-assembly + assembly { + result := 1 + let mask := shl(7, div(not(0), 255)) + let n := mload(s) + if n { + let o := add(s, 0x20) + let end := add(o, n) + let last := mload(end) + mstore(end, 0) + for { } 1 { } { + if and(mask, mload(o)) { + result := 0 + break + } + o := add(o, 0x20) + if iszero(lt(o, end)) { break } + } + mstore(end, last) + } + } + } + + /// @dev Returns if this string is a 7-bit ASCII string, + /// AND all characters are in the `allowed` lookup. + /// Note: If `s` is empty, returns true regardless of `allowed`. + function is7BitASCII(string memory s, uint128 allowed) internal pure returns (bool result) { + /// @solidity memory-safe-assembly + assembly { + result := 1 + if mload(s) { + let allowed_ := shr(128, shl(128, allowed)) + let o := add(s, 0x20) + for { let end := add(o, mload(s)) } 1 { } { + result := and(result, shr(byte(0, mload(o)), allowed_)) + o := add(o, 1) + if iszero(and(result, lt(o, end))) { break } + } + } + } + } + + /// @dev Converts the bytes in the 7-bit ASCII string `s` to + /// an allowed lookup for use in `is7BitASCII(s, allowed)`. + /// To save runtime gas, you can cache the result in an immutable variable. + function to7BitASCIIAllowedLookup(string memory s) internal pure returns (uint128 result) { + /// @solidity memory-safe-assembly + assembly { + if mload(s) { + let o := add(s, 0x20) + for { let end := add(o, mload(s)) } 1 { } { + result := or(result, shl(byte(0, mload(o)), 1)) + o := add(o, 1) + if iszero(lt(o, end)) { break } + } + if shr(128, result) { + mstore(0x00, 0xc9807e0d) // `StringNot7BitASCII()`. + revert(0x1c, 0x04) + } + } + } + } + + /*´:°•.°+.*•´.*:˚.°*.˚•´.°:°•.°•.*•´.*:˚.°*.˚•´.°:°•.°+.*•´.*:*/ + /* BYTE STRING OPERATIONS */ + /*.•°:°.´+˚.*°.˚:*.´•*.+°.•°:´*.´•*.•°.•°:°.´:•˚°.*°.˚:*.´+°.•*/ + + // For performance and bytecode compactness, byte string operations are restricted + // to 7-bit ASCII strings. All offsets are byte offsets, not UTF character offsets. + // Usage of byte string operations on charsets with runes spanning two or more bytes + // can lead to undefined behavior. + + /// @dev Returns `subject` all occurrences of `needle` replaced with `replacement`. + function replace(string memory subject, string memory needle, string memory replacement) + internal + pure + returns (string memory) + { + return string(LibBytes.replace(bytes(subject), bytes(needle), bytes(replacement))); + } + + /// @dev Returns the byte index of the first location of `needle` in `subject`, + /// needleing from left to right, starting from `from`. + /// Returns `NOT_FOUND` (i.e. `type(uint256).max`) if the `needle` is not found. + function indexOf(string memory subject, string memory needle, uint256 from) internal pure returns (uint256) { + return LibBytes.indexOf(bytes(subject), bytes(needle), from); + } + + /// @dev Returns the byte index of the first location of `needle` in `subject`, + /// needleing from left to right. + /// Returns `NOT_FOUND` (i.e. `type(uint256).max`) if the `needle` is not found. + function indexOf(string memory subject, string memory needle) internal pure returns (uint256) { + return LibBytes.indexOf(bytes(subject), bytes(needle), 0); + } + + /// @dev Returns the byte index of the first location of `needle` in `subject`, + /// needleing from right to left, starting from `from`. + /// Returns `NOT_FOUND` (i.e. `type(uint256).max`) if the `needle` is not found. + function lastIndexOf(string memory subject, string memory needle, uint256 from) internal pure returns (uint256) { + return LibBytes.lastIndexOf(bytes(subject), bytes(needle), from); + } + + /// @dev Returns the byte index of the first location of `needle` in `subject`, + /// needleing from right to left. + /// Returns `NOT_FOUND` (i.e. `type(uint256).max`) if the `needle` is not found. + function lastIndexOf(string memory subject, string memory needle) internal pure returns (uint256) { + return LibBytes.lastIndexOf(bytes(subject), bytes(needle), type(uint256).max); + } + + /// @dev Returns true if `needle` is found in `subject`, false otherwise. + function contains(string memory subject, string memory needle) internal pure returns (bool) { + return LibBytes.contains(bytes(subject), bytes(needle)); + } + + /// @dev Returns whether `subject` starts with `needle`. + function startsWith(string memory subject, string memory needle) internal pure returns (bool) { + return LibBytes.startsWith(bytes(subject), bytes(needle)); + } + + /// @dev Returns whether `subject` ends with `needle`. + function endsWith(string memory subject, string memory needle) internal pure returns (bool) { + return LibBytes.endsWith(bytes(subject), bytes(needle)); + } + + /// @dev Returns `subject` repeated `times`. + function repeat(string memory subject, uint256 times) internal pure returns (string memory) { + return string(LibBytes.repeat(bytes(subject), times)); + } + + /// @dev Returns a copy of `subject` sliced from `start` to `end` (exclusive). + /// `start` and `end` are byte offsets. + function slice(string memory subject, uint256 start, uint256 end) internal pure returns (string memory) { + return string(LibBytes.slice(bytes(subject), start, end)); + } + + /// @dev Returns a copy of `subject` sliced from `start` to the end of the string. + /// `start` is a byte offset. + function slice(string memory subject, uint256 start) internal pure returns (string memory) { + return string(LibBytes.slice(bytes(subject), start, type(uint256).max)); + } + + /// @dev Returns all the indices of `needle` in `subject`. + /// The indices are byte offsets. + function indicesOf(string memory subject, string memory needle) internal pure returns (uint256[] memory) { + return LibBytes.indicesOf(bytes(subject), bytes(needle)); + } + + /// @dev Returns an arrays of strings based on the `delimiter` inside of the `subject` string. + function split(string memory subject, string memory delimiter) internal pure returns (string[] memory result) { + bytes[] memory a = LibBytes.split(bytes(subject), bytes(delimiter)); + /// @solidity memory-safe-assembly + assembly { + result := a + } + } + + /// @dev Returns a concatenated string of `a` and `b`. + /// Cheaper than `string.concat()` and does not de-align the free memory pointer. + function concat(string memory a, string memory b) internal pure returns (string memory) { + return string(LibBytes.concat(bytes(a), bytes(b))); + } + + /// @dev Returns a copy of the string in either lowercase or UPPERCASE. + /// WARNING! This function is only compatible with 7-bit ASCII strings. + function toCase(string memory subject, bool toUpper) internal pure returns (string memory result) { + /// @solidity memory-safe-assembly + assembly { + let n := mload(subject) + if n { + result := mload(0x40) + let o := add(result, 0x20) + let d := sub(subject, result) + let flags := shl(add(70, shl(5, toUpper)), 0x3ffffff) + for { let end := add(o, n) } 1 { } { + let b := byte(0, mload(add(d, o))) + mstore8(o, xor(and(shr(b, flags), 0x20), b)) + o := add(o, 1) + if eq(o, end) { break } + } + mstore(result, n) // Store the length. + mstore(o, 0) // Zeroize the slot after the string. + mstore(0x40, add(o, 0x20)) // Allocate memory. + } + } + } + + /// @dev Returns a string from a small bytes32 string. + /// `s` must be null-terminated, or behavior will be undefined. + function fromSmallString(bytes32 s) internal pure returns (string memory result) { + /// @solidity memory-safe-assembly + assembly { + result := mload(0x40) + let n := 0 + for { } byte(n, s) { n := add(n, 1) } { } // Scan for '\0'. + mstore(result, n) // Store the length. + let o := add(result, 0x20) + mstore(o, s) // Store the bytes of the string. + mstore(add(o, n), 0) // Zeroize the slot after the string. + mstore(0x40, add(result, 0x40)) // Allocate memory. + } + } + + /// @dev Returns the small string, with all bytes after the first null byte zeroized. + function normalizeSmallString(bytes32 s) internal pure returns (bytes32 result) { + /// @solidity memory-safe-assembly + assembly { + for { } byte(result, s) { result := add(result, 1) } { } // Scan for '\0'. + mstore(0x00, s) + mstore(result, 0x00) + result := mload(0x00) + } + } + + /// @dev Returns the string as a normalized null-terminated small string. + function toSmallString(string memory s) internal pure returns (bytes32 result) { + /// @solidity memory-safe-assembly + assembly { + result := mload(s) + if iszero(lt(result, 33)) { + mstore(0x00, 0xec92f9a3) // `TooBigForSmallString()`. + revert(0x1c, 0x04) + } + result := shl(shl(3, sub(32, result)), mload(add(s, result))) + } + } + + /// @dev Returns a lowercased copy of the string. + /// WARNING! This function is only compatible with 7-bit ASCII strings. + function lower(string memory subject) internal pure returns (string memory result) { + result = toCase(subject, false); + } + + /// @dev Returns an UPPERCASED copy of the string. + /// WARNING! This function is only compatible with 7-bit ASCII strings. + function upper(string memory subject) internal pure returns (string memory result) { + result = toCase(subject, true); + } + + /// @dev Escapes the string to be used within HTML tags. + function escapeHTML(string memory s) internal pure returns (string memory result) { + /// @solidity memory-safe-assembly + assembly { + result := mload(0x40) + let end := add(s, mload(s)) + let o := add(result, 0x20) + // Store the bytes of the packed offsets and strides into the scratch space. + // `packed = (stride << 5) | offset`. Max offset is 20. Max stride is 6. + mstore(0x1f, 0x900094) + mstore(0x08, 0xc0000000a6ab) + // Store ""&'<>" into the scratch space. + mstore(0x00, shl(64, 0x2671756f743b26616d703b262333393b266c743b2667743b)) + for { } iszero(eq(s, end)) { } { + s := add(s, 1) + let c := and(mload(s), 0xff) + // Not in `["\"","'","&","<",">"]`. + if iszero(and(shl(c, 1), 0x500000c400000000)) { + mstore8(o, c) + o := add(o, 1) + continue + } + let t := shr(248, mload(c)) + mstore(o, mload(and(t, 0x1f))) + o := add(o, shr(5, t)) + } + mstore(o, 0) // Zeroize the slot after the string. + mstore(result, sub(o, add(result, 0x20))) // Store the length. + mstore(0x40, add(o, 0x20)) // Allocate memory. + } + } + + /// @dev Escapes the string to be used within double-quotes in a JSON. + /// If `addDoubleQuotes` is true, the result will be enclosed in double-quotes. + function escapeJSON(string memory s, bool addDoubleQuotes) internal pure returns (string memory result) { + /// @solidity memory-safe-assembly + assembly { + result := mload(0x40) + let o := add(result, 0x20) + if addDoubleQuotes { + mstore8(o, 34) + o := add(1, o) + } + // Store "\\u0000" in scratch space. + // Store "0123456789abcdef" in scratch space. + // Also, store `{0x08:"b", 0x09:"t", 0x0a:"n", 0x0c:"f", 0x0d:"r"}`. + // into the scratch space. + mstore(0x15, 0x5c75303030303031323334353637383961626364656662746e006672) + // Bitmask for detecting `["\"","\\"]`. + let e := or(shl(0x22, 1), shl(0x5c, 1)) + for { let end := add(s, mload(s)) } iszero(eq(s, end)) { } { + s := add(s, 1) + let c := and(mload(s), 0xff) + if iszero(lt(c, 0x20)) { + if iszero(and(shl(c, 1), e)) { + // Not in `["\"","\\"]`. + mstore8(o, c) + o := add(o, 1) + continue + } + mstore8(o, 0x5c) // "\\". + mstore8(add(o, 1), c) + o := add(o, 2) + continue + } + if iszero(and(shl(c, 1), 0x3700)) { + // Not in `["\b","\t","\n","\f","\d"]`. + mstore8(0x1d, mload(shr(4, c))) // Hex value. + mstore8(0x1e, mload(and(c, 15))) // Hex value. + mstore(o, mload(0x19)) // "\\u00XX". + o := add(o, 6) + continue + } + mstore8(o, 0x5c) // "\\". + mstore8(add(o, 1), mload(add(c, 8))) + o := add(o, 2) + } + if addDoubleQuotes { + mstore8(o, 34) + o := add(1, o) + } + mstore(o, 0) // Zeroize the slot after the string. + mstore(result, sub(o, add(result, 0x20))) // Store the length. + mstore(0x40, add(o, 0x20)) // Allocate memory. + } + } + + /// @dev Escapes the string to be used within double-quotes in a JSON. + function escapeJSON(string memory s) internal pure returns (string memory result) { + result = escapeJSON(s, false); + } + + /// @dev Encodes `s` so that it can be safely used in a URI, + /// just like `encodeURIComponent` in JavaScript. + /// See: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/encodeURIComponent + /// See: https://datatracker.ietf.org/doc/html/rfc2396 + /// See: https://datatracker.ietf.org/doc/html/rfc3986 + function encodeURIComponent(string memory s) internal pure returns (string memory result) { + /// @solidity memory-safe-assembly + assembly { + result := mload(0x40) + // Store "0123456789ABCDEF" in scratch space. + // Uppercased to be consistent with JavaScript's implementation. + mstore(0x0f, 0x30313233343536373839414243444546) + let o := add(result, 0x20) + for { let end := add(s, mload(s)) } iszero(eq(s, end)) { } { + s := add(s, 1) + let c := and(mload(s), 0xff) + // If not in `[0-9A-Z-a-z-_.!~*'()]`. + if iszero(and(1, shr(c, 0x47fffffe87fffffe03ff678200000000))) { + mstore8(o, 0x25) // '%'. + mstore8(add(o, 1), mload(and(shr(4, c), 15))) + mstore8(add(o, 2), mload(and(c, 15))) + o := add(o, 3) + continue + } + mstore8(o, c) + o := add(o, 1) + } + mstore(result, sub(o, add(result, 0x20))) // Store the length. + mstore(o, 0) // Zeroize the slot after the string. + mstore(0x40, add(o, 0x20)) // Allocate memory. + } + } + + /// @dev Returns whether `a` equals `b`. + function eq(string memory a, string memory b) internal pure returns (bool result) { + /// @solidity memory-safe-assembly + assembly { + result := eq(keccak256(add(a, 0x20), mload(a)), keccak256(add(b, 0x20), mload(b))) + } + } + + /// @dev Returns whether `a` equals `b`, where `b` is a null-terminated small string. + function eqs(string memory a, bytes32 b) internal pure returns (bool result) { + /// @solidity memory-safe-assembly + assembly { + // These should be evaluated on compile time, as far as possible. + let m := not(shl(7, div(not(iszero(b)), 255))) // `0x7f7f ...`. + let x := not(or(m, or(b, add(m, and(b, m))))) + let r := shl(7, iszero(iszero(shr(128, x)))) + r := or(r, shl(6, iszero(iszero(shr(64, shr(r, x)))))) + r := or(r, shl(5, lt(0xffffffff, shr(r, x)))) + r := or(r, shl(4, lt(0xffff, shr(r, x)))) + r := or(r, shl(3, lt(0xff, shr(r, x)))) + // forgefmt: disable-next-item + result := gt(eq(mload(a), add(iszero(x), xor(31, shr(3, r)))), + xor(shr(add(8, r), b), shr(add(8, r), mload(add(a, 0x20))))) + } + } + + /// @dev Returns 0 if `a == b`, -1 if `a < b`, +1 if `a > b`. + /// If `a` == b[:a.length]`, and `a.length < b.length`, returns -1. + function cmp(string memory a, string memory b) internal pure returns (int256) { + return LibBytes.cmp(bytes(a), bytes(b)); + } + + /// @dev Packs a single string with its length into a single word. + /// Returns `bytes32(0)` if the length is zero or greater than 31. + function packOne(string memory a) internal pure returns (bytes32 result) { + /// @solidity memory-safe-assembly + assembly { + // We don't need to zero right pad the string, + // since this is our own custom non-standard packing scheme. + result := + mul( + // Load the length and the bytes. + mload(add(a, 0x1f)), + // `length != 0 && length < 32`. Abuses underflow. + // Assumes that the length is valid and within the block gas limit. + lt(sub(mload(a), 1), 0x1f) + ) + } + } + + /// @dev Unpacks a string packed using {packOne}. + /// Returns the empty string if `packed` is `bytes32(0)`. + /// If `packed` is not an output of {packOne}, the output behavior is undefined. + function unpackOne(bytes32 packed) internal pure returns (string memory result) { + /// @solidity memory-safe-assembly + assembly { + result := mload(0x40) // Grab the free memory pointer. + mstore(0x40, add(result, 0x40)) // Allocate 2 words (1 for the length, 1 for the bytes). + mstore(result, 0) // Zeroize the length slot. + mstore(add(result, 0x1f), packed) // Store the length and bytes. + mstore(add(add(result, 0x20), mload(result)), 0) // Right pad with zeroes. + } + } + + /// @dev Packs two strings with their lengths into a single word. + /// Returns `bytes32(0)` if combined length is zero or greater than 30. + function packTwo(string memory a, string memory b) internal pure returns (bytes32 result) { + /// @solidity memory-safe-assembly + assembly { + let aLen := mload(a) + // We don't need to zero right pad the strings, + // since this is our own custom non-standard packing scheme. + result := + mul( + or( // Load the length and the bytes of `a` and `b`. + shl(shl(3, sub(0x1f, aLen)), mload(add(a, aLen))), mload(sub(add(b, 0x1e), aLen))), + // `totalLen != 0 && totalLen < 31`. Abuses underflow. + // Assumes that the lengths are valid and within the block gas limit. + lt(sub(add(aLen, mload(b)), 1), 0x1e) + ) + } + } + + /// @dev Unpacks strings packed using {packTwo}. + /// Returns the empty strings if `packed` is `bytes32(0)`. + /// If `packed` is not an output of {packTwo}, the output behavior is undefined. + function unpackTwo(bytes32 packed) internal pure returns (string memory resultA, string memory resultB) { + /// @solidity memory-safe-assembly + assembly { + resultA := mload(0x40) // Grab the free memory pointer. + resultB := add(resultA, 0x40) + // Allocate 2 words for each string (1 for the length, 1 for the byte). Total 4 words. + mstore(0x40, add(resultB, 0x40)) + // Zeroize the length slots. + mstore(resultA, 0) + mstore(resultB, 0) + // Store the lengths and bytes. + mstore(add(resultA, 0x1f), packed) + mstore(add(resultB, 0x1f), mload(add(add(resultA, 0x20), mload(resultA)))) + // Right pad with zeroes. + mstore(add(add(resultA, 0x20), mload(resultA)), 0) + mstore(add(add(resultB, 0x20), mload(resultB)), 0) + } + } + + /// @dev Directly returns `a` without copying. + function directReturn(string memory a) internal pure { + /// @solidity memory-safe-assembly + assembly { + // Assumes that the string does not start from the scratch space. + let retStart := sub(a, 0x20) + let retUnpaddedSize := add(mload(a), 0x40) + // Right pad with zeroes. Just in case the string is produced + // by a method that doesn't zero right pad. + mstore(add(retStart, retUnpaddedSize), 0) + mstore(retStart, 0x20) // Store the return offset. + // End the transaction, returning the string. + return(retStart, and(not(0x1f), add(0x1f, retUnpaddedSize))) + } + } +} diff --git a/crates/sdk/contracts/test/helpers/MockDeps.sol b/crates/sdk/contracts/test/helpers/MockDeps.sol new file mode 100644 index 0000000000..122965348d --- /dev/null +++ b/crates/sdk/contracts/test/helpers/MockDeps.sol @@ -0,0 +1,12 @@ +interface IOpenVmHalo2Verifier { + function verify(bytes calldata publicValues, bytes calldata proofData, bytes32 appExeCommit, bytes32 appVmCommit) + external + view; +} + +contract Halo2Verifier { + /// Mock verifier always reverts + fallback(bytes calldata) external returns (bytes memory) { + revert("Verification failed"); + } +} diff --git a/crates/sdk/examples/sdk_app.rs b/crates/sdk/examples/sdk_app.rs index 9514fb6485..fb78a01a3a 100644 --- a/crates/sdk/examples/sdk_app.rs +++ b/crates/sdk/examples/sdk_app.rs @@ -9,7 +9,7 @@ use openvm_sdk::{ prover::AppProver, Sdk, StdIn, }; -use openvm_stark_sdk::config::FriParameters; +use openvm_stark_sdk::config::{baby_bear_poseidon2::BabyBearPoseidon2Engine, FriParameters}; use openvm_transpiler::elf::Elf; use serde::{Deserialize, Serialize}; @@ -90,8 +90,11 @@ fn main() -> Result<(), Box> { // 9a. Generate a proof let proof = sdk.generate_app_proof(app_pk.clone(), app_committed_exe.clone(), stdin.clone())?; // 9b. Generate a proof with an AppProver with custom fields - let app_prover = AppProver::new(app_pk.app_vm_pk.clone(), app_committed_exe.clone()) - .with_program_name("test_program"); + let app_prover = AppProver::<_, BabyBearPoseidon2Engine>::new( + app_pk.app_vm_pk.clone(), + app_committed_exe.clone(), + ) + .with_program_name("test_program"); let proof = app_prover.generate_app_proof(stdin.clone()); // ANCHOR_END: proof_generation diff --git a/crates/sdk/examples/sdk_evm.rs b/crates/sdk/examples/sdk_evm.rs index a6940ca515..5df7af44db 100644 --- a/crates/sdk/examples/sdk_evm.rs +++ b/crates/sdk/examples/sdk_evm.rs @@ -96,7 +96,7 @@ fn main() -> Result<(), Box> { )?; // 9. Generate the SNARK verifier smart contract - let verifier = sdk.generate_snark_verifier_contract(&halo2_params_reader, &agg_pk)?; + let verifier = sdk.generate_halo2_verifier_solidity(&halo2_params_reader, &agg_pk)?; // 10. Generate an EVM proof let proof = sdk.generate_evm_proof( @@ -108,7 +108,7 @@ fn main() -> Result<(), Box> { )?; // 11. Verify the EVM proof - sdk.verify_evm_proof(&verifier, &proof)?; + sdk.verify_evm_halo2_proof(&verifier, &proof)?; // ANCHOR_END: evm_verification Ok(()) diff --git a/crates/sdk/guest/src/main.rs b/crates/sdk/guest/src/main.rs index b8568d74a5..bc6d94cda8 100644 --- a/crates/sdk/guest/src/main.rs +++ b/crates/sdk/guest/src/main.rs @@ -4,7 +4,7 @@ openvm::entry!(main); pub fn main() { - let n = core::hint::black_box(1 << 10); + let n = core::hint::black_box(1 << 3); let mut a: u32 = 0; let mut b: u32 = 1; for _ in 1..n { @@ -15,4 +15,7 @@ pub fn main() { if a == 0 { panic!(); } + + openvm::io::reveal_u32(a, 0); + openvm::io::reveal_u32(b, 1); } diff --git a/crates/sdk/src/codec.rs b/crates/sdk/src/codec.rs index 50ccd75271..5bc7a525f6 100644 --- a/crates/sdk/src/codec.rs +++ b/crates/sdk/src/codec.rs @@ -7,8 +7,10 @@ use openvm_continuations::verifier::root::types::RootVmVerifierInput; use openvm_native_compiler::ir::DIGEST_SIZE; use openvm_native_recursion::hints::{InnerBatchOpening, InnerFriProof, InnerQueryProof}; use openvm_stark_backend::{ - config::{Com, PcsProof}, - interaction::{fri_log_up::FriLogUpPartialProof, RapPhaseSeqKind}, + config::{Com, PcsProof, RapPhaseSeqPartialProof}, + interaction::{ + fri_log_up::FriLogUpPartialProof, gkr_log_up::GkrLogUpPartialProof, RapPhaseSeqKind, + }, p3_field::{ extension::BinomialExtensionField, FieldAlgebra, FieldExtensionAlgebra, PrimeField32, }, @@ -115,8 +117,8 @@ impl Encode for Proof { // Helper function to encode OpeningProof // ``` // pub struct OpeningProof { -// pub proof: PcsProof, -// pub values: OpenedValues, +// pub proof: PcsProof, +// pub values: OpenedValues, // } // ``` fn encode_opening_proof( @@ -147,6 +149,11 @@ fn encode_opened_values( for phase in &opened_values.after_challenge { encode_slice(phase, writer)?; } + todo!(); + // opened_values.extra_after_challenge.len().encode(writer)?; + // for phase in &opened_values.extra_after_challenge { + // encode_slice(phase, writer)?; + // } opened_values.quotient.len().encode(writer)?; for per_air in &opened_values.quotient { per_air.len().encode(writer)?; @@ -226,7 +233,6 @@ impl Encode for InnerQueryProof { /// pub opening_proof: Vec<[F; DIGEST_SIZE]>, /// } /// ``` - /// // @dev [jpw]: We prefer to keep the implementation all in one function // without `impl Encode` on subtypes because it obfuscates what the overall // struct consists of. @@ -261,6 +267,12 @@ impl Encode for Option> { } } +impl Encode for Option, F>> { + fn encode(&self, writer: &mut W) -> Result<()> { + todo!() + } +} + impl Encode for Challenge { fn encode(&self, writer: &mut W) -> Result<()> { let base_slice: &[F] = self.as_base_slice(); @@ -278,7 +290,8 @@ fn encode_commitments(commitments: &[Com], writer: &mut W) -> Resu encode_slice(&coms, writer) } -// Can't implement Encode on Com because Rust complains about associated trait types when you don't own the trait (in this case SC) +// Can't implement Encode on Com because Rust complains about associated trait types when you +// don't own the trait (in this case SC) impl Encode for [F; DIGEST_SIZE] { fn encode(&self, writer: &mut W) -> Result<()> { for val in self { @@ -392,7 +405,7 @@ impl Decode for Proof { } // Decode logup witness - let rap_phase_seq_proof = Option::>::decode(reader)?; + let rap_phase_seq_proof = Option::>::decode(reader)?; Ok(Proof { commitments, @@ -443,6 +456,13 @@ fn decode_opened_values(reader: &mut R) -> Result(reader: &mut R) -> Result> { } } +impl Decode for Option, F>> { + fn decode(reader: &mut R) -> Result { + todo!() + } +} + impl Decode for Challenge { fn decode(reader: &mut R) -> Result { // For a BinomialExtensionField, we need to read 4 F elements diff --git a/crates/sdk/src/commit.rs b/crates/sdk/src/commit.rs index 2e396b2763..7045d96443 100644 --- a/crates/sdk/src/commit.rs +++ b/crates/sdk/src/commit.rs @@ -35,8 +35,8 @@ pub struct AppExecutionCommit { } impl AppExecutionCommit { - /// Users should use this function to compute `AppExecutionCommit` and check it against the final - /// proof. + /// Users should use this function to compute `AppExecutionCommit` and check it against the + /// final proof. pub fn compute>( app_vm_config: &VC, app_exe: &NonRootCommittedExe, diff --git a/crates/sdk/src/config/mod.rs b/crates/sdk/src/config/mod.rs index 8d4525338d..035e3709ef 100644 --- a/crates/sdk/src/config/mod.rs +++ b/crates/sdk/src/config/mod.rs @@ -1,3 +1,4 @@ +use clap::Args; use openvm_circuit::arch::instructions::program::DEFAULT_MAX_NUM_PUBLIC_VALUES; use openvm_continuations::verifier::{ common::types::VmVerifierPvs, internal::types::InternalVmVerifierPvs, @@ -15,6 +16,11 @@ pub const DEFAULT_LEAF_LOG_BLOWUP: usize = 1; pub const DEFAULT_INTERNAL_LOG_BLOWUP: usize = 2; pub const DEFAULT_ROOT_LOG_BLOWUP: usize = 3; +// Aggregation Tree Defaults +const DEFAULT_NUM_CHILDREN_LEAF: usize = 1; +const DEFAULT_NUM_CHILDREN_INTERNAL: usize = 3; +const DEFAULT_MAX_INTERNAL_WRAPPER_LAYERS: usize = 4; + #[derive(Clone, Debug, Serialize, Deserialize)] pub struct AppConfig { #[serde(default)] @@ -59,6 +65,23 @@ pub struct Halo2Config { pub profiling: bool, } +#[derive(Clone, Copy, Debug, Serialize, Deserialize, Args)] +pub struct AggregationTreeConfig { + /// Each leaf verifier circuit will aggregate this many App VM proofs. + #[arg(long, default_value_t = DEFAULT_NUM_CHILDREN_LEAF)] + pub num_children_leaf: usize, + /// Each internal verifier circuit will aggregate this many proofs, + /// where each proof may be of either leaf or internal verifier (self) circuit. + #[arg(long, default_value_t = DEFAULT_NUM_CHILDREN_INTERNAL)] + pub num_children_internal: usize, + /// Safety threshold: how many times to do 1-to-1 aggregation of the "last" internal + /// verifier proof before it is small enough for the root verifier circuit. + /// Note: almost always no wrapping is needed. + #[arg(long, default_value_t = DEFAULT_MAX_INTERNAL_WRAPPER_LAYERS)] + pub max_internal_wrapper_layers: usize, + // root currently always has 1 child for now +} + impl AppConfig { pub fn new(app_fri_params: FriParameters, app_vm_config: VC) -> Self { Self { @@ -187,3 +210,13 @@ impl AggStarkConfig { config } } + +impl Default for AggregationTreeConfig { + fn default() -> Self { + Self { + num_children_leaf: DEFAULT_NUM_CHILDREN_LEAF, + num_children_internal: DEFAULT_NUM_CHILDREN_INTERNAL, + max_internal_wrapper_layers: DEFAULT_MAX_INTERNAL_WRAPPER_LAYERS, + } + } +} diff --git a/crates/sdk/src/fs.rs b/crates/sdk/src/fs.rs index bd4a230547..ea7136f558 100644 --- a/crates/sdk/src/fs.rs +++ b/crates/sdk/src/fs.rs @@ -1,22 +1,24 @@ use std::{ - fs::{create_dir_all, read, write, File}, + fs::{create_dir_all, read, read_to_string, write, File}, path::Path, }; use eyre::Result; use openvm_circuit::arch::{instructions::exe::VmExe, ContinuationVmProof, VmConfig}; use openvm_continuations::verifier::root::types::RootVmVerifierInput; -use openvm_native_recursion::halo2::wrapper::{EvmVerifier, EvmVerifierByteCode}; +use openvm_native_recursion::halo2::wrapper::EvmVerifierByteCode; use serde::{de::DeserializeOwned, Serialize}; use crate::{ codec::{Decode, Encode}, keygen::{AggProvingKey, AppProvingKey, AppVerifyingKey}, - types::EvmProof, + types::{EvmHalo2Verifier, EvmProof}, F, SC, }; -pub const EVM_VERIFIER_SOL_FILENAME: &str = "verifier.sol"; +pub const EVM_HALO2_VERIFIER_INTERFACE_NAME: &str = "IOpenVmHalo2Verifier.sol"; +pub const EVM_HALO2_VERIFIER_PARENT_NAME: &str = "Halo2Verifier.sol"; +pub const EVM_HALO2_VERIFIER_BASE_NAME: &str = "OpenVmHalo2Verifier.sol"; pub const EVM_VERIFIER_ARTIFACT_FILENAME: &str = "verifier.bytecode.json"; pub fn read_exe_from_file>(path: P) -> Result> { @@ -90,22 +92,68 @@ pub fn write_evm_proof_to_file>(proof: EvmProof, path: P) -> Resu Ok(()) } -pub fn read_evm_verifier_from_folder>(folder: P) -> Result { - let sol_code_path = folder.as_ref().join(EVM_VERIFIER_SOL_FILENAME); - let sol_code = std::fs::read_to_string(sol_code_path)?; +pub fn read_evm_halo2_verifier_from_folder>(folder: P) -> Result { + let halo2_verifier_code_path = folder.as_ref().join(EVM_HALO2_VERIFIER_PARENT_NAME); + let openvm_verifier_code_path = folder.as_ref().join(EVM_HALO2_VERIFIER_BASE_NAME); + let interface_path = folder + .as_ref() + .join("interfaces") + .join(EVM_HALO2_VERIFIER_INTERFACE_NAME); + let halo2_verifier_code = read_to_string(halo2_verifier_code_path)?; + let openvm_verifier_code = read_to_string(openvm_verifier_code_path)?; + let interface = read_to_string(interface_path)?; + let artifact_path = folder.as_ref().join(EVM_VERIFIER_ARTIFACT_FILENAME); let artifact: EvmVerifierByteCode = serde_json::from_reader(File::open(artifact_path)?)?; - Ok(EvmVerifier { sol_code, artifact }) -} -pub fn write_evm_verifier_to_folder>( - verifier: EvmVerifier, + Ok(EvmHalo2Verifier { + halo2_verifier_code, + openvm_verifier_code, + openvm_verifier_interface: interface, + artifact, + }) +} + +/// Writes three Solidity contracts into the following folder structure: +/// +/// ```text +/// halo2/ +/// ├── interfaces/ +/// │ └── IOpenVmHalo2Verifier.sol +/// ├── OpenVmHalo2Verifier.sol +/// └── Halo2Verifier.sol +/// ``` +/// +/// If the relevant directories do not exist, they will be created. +pub fn write_evm_halo2_verifier_to_folder>( + verifier: EvmHalo2Verifier, folder: P, ) -> Result<()> { - let sol_code_path = folder.as_ref().join(EVM_VERIFIER_SOL_FILENAME); - std::fs::write(sol_code_path, verifier.sol_code)?; - let artifact_path = folder.as_ref().join(EVM_VERIFIER_ARTIFACT_FILENAME); + let folder = folder.as_ref(); + if !folder.exists() { + create_dir_all(folder)?; // Make sure directories exist + } + + let halo2_verifier_code_path = folder.join(EVM_HALO2_VERIFIER_PARENT_NAME); + let openvm_verifier_code_path = folder.join(EVM_HALO2_VERIFIER_BASE_NAME); + let interface_path = folder + .join("interfaces") + .join(EVM_HALO2_VERIFIER_INTERFACE_NAME); + + if let Some(parent) = interface_path.parent() { + create_dir_all(parent)?; + } + + write(halo2_verifier_code_path, verifier.halo2_verifier_code) + .expect("Failed to write halo2 verifier code"); + write(openvm_verifier_code_path, verifier.openvm_verifier_code) + .expect("Failed to write openvm halo2 verifier code"); + write(interface_path, verifier.openvm_verifier_interface) + .expect("Failed to write openvm halo2 verifier interface"); + + let artifact_path = folder.join(EVM_VERIFIER_ARTIFACT_FILENAME); serde_json::to_writer(File::create(artifact_path)?, &verifier.artifact)?; + Ok(()) } @@ -118,7 +166,7 @@ pub fn write_object_to_file>(path: P, data: T) -> R } pub(crate) fn read_from_file_bitcode>(path: P) -> Result { - let data = std::fs::read(path)?; + let data = read(path)?; let ret = bitcode::deserialize(&data)?; Ok(ret) } diff --git a/crates/sdk/src/keygen/dummy.rs b/crates/sdk/src/keygen/dummy.rs index d025411d81..44b66454b9 100644 --- a/crates/sdk/src/keygen/dummy.rs +++ b/crates/sdk/src/keygen/dummy.rs @@ -63,9 +63,9 @@ pub(super) fn compute_root_proof_heights( .into_iter() .map(next_power_of_two_or_zero) .collect(); - let mut internal_heights = res.internal_heights; - internal_heights.round_to_next_power_of_two_or_zero(); - (air_heights, internal_heights) + let mut vm_heights = res.vm_heights; + vm_heights.round_to_next_power_of_two_or_zero(); + (air_heights, vm_heights) } pub(super) fn dummy_internal_proof( @@ -177,7 +177,8 @@ where let overridden_heights = if let Some(overridden_heights) = overridden_heights { overridden_heights } else { - // We first execute once to get the trace heights from dummy_exe, then pad to powers of 2 (forcing trace height 0 to 1) + // We first execute once to get the trace heights from dummy_exe, then pad to powers of 2 + // (forcing trace height 0 to 1) let executor = VmExecutor::new(app_vm_pk.vm_config.clone()); let mut results = executor .execute_segments(dummy_exe.exe.clone(), vec![]) @@ -186,9 +187,9 @@ where assert_eq!(results.len(), 1, "dummy exe should have only 1 segment"); let mut result = results.pop().unwrap(); result.chip_complex.finalize_memory(); - let mut internal_heights = result.chip_complex.get_internal_trace_heights(); - internal_heights.round_to_next_power_of_two(); - internal_heights + let mut vm_heights = result.chip_complex.get_internal_trace_heights(); + vm_heights.round_to_next_power_of_two(); + vm_heights }; // For the dummy proof, we must override the trace heights. let app_prover = diff --git a/crates/sdk/src/keygen/mod.rs b/crates/sdk/src/keygen/mod.rs index 07265dd2d8..bbd6ec269e 100644 --- a/crates/sdk/src/keygen/mod.rs +++ b/crates/sdk/src/keygen/mod.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use derivative::Derivative; use dummy::{compute_root_proof_heights, dummy_internal_proof_riscv_app_vm}; use openvm_circuit::{ - arch::{VirtualMachine, VmConfig}, + arch::{VirtualMachine, VmComplexTraceHeights, VmConfig}, system::{memory::dimensions::MemoryDimensions, program::trace::VmCommittedExe}, }; use openvm_continuations::{ @@ -83,7 +83,8 @@ pub struct AggStarkProvingKey { pub struct Halo2ProvingKey { /// Static verifier to verify a stark proof of the root verifier. pub verifier: Halo2VerifierProvingKey, - /// Wrapper circuit to verify static verifier and reduce the verification costs in the final proof. + /// Wrapper circuit to verify static verifier and reduce the verification costs in the final + /// proof. pub wrapper: Halo2WrapperProvingKey, /// Whether to collect detailed profiling metrics pub profiling: bool, @@ -236,7 +237,8 @@ fn check_recursive_verifier_size( if fri_reduced_opening_trace_height > (1 << (Val::::TWO_ADICITY - next_log_blowup)) { tracing::warn!("recursive verifier size may be too large; FriReducedOpening height ({fri_reduced_opening_trace_height}) > {}", 1 << (Val::::TWO_ADICITY - next_log_blowup)); } - // Second check: static check for log up soundness constraints using FriReducedOpening trace height as proxy + // Second check: static check for log up soundness constraints using FriReducedOpening trace + // height as proxy if fri_reduced_opening_trace_height as u32 >= Val::::ORDER_U32 / 200 { tracing::warn!( "recursive verifier size may violate log up soundness constraints; {} > {}", @@ -333,7 +335,7 @@ impl AggStarkProvingKey { let mut vm_pk = vm.keygen(); assert!(vm_pk.max_constraint_degree <= config.root_fri_params.max_constraint_degree()); - let (air_heights, _internal_heights) = compute_root_proof_heights( + let (air_heights, vm_heights) = compute_root_proof_heights( root_vm_config.clone(), root_committed_exe.exe.clone(), &internal_proof, @@ -349,6 +351,7 @@ impl AggStarkProvingKey { }), root_committed_exe, air_heights, + vm_heights, } }; ( @@ -392,9 +395,8 @@ pub struct RootVerifierProvingKey { pub root_committed_exe: Arc>, /// The constant trace heights, ordered by AIR ID. pub air_heights: Vec, - // The following is currently not used: - // The constant trace heights, ordered according to an internal ordering determined by the `NativeConfig`. - // pub internal_heights: VmComplexTraceHeights, + /// The constant trace heights in a semantic way for VM. + pub vm_heights: VmComplexTraceHeights, } impl RootVerifierProvingKey { diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs index 7f0640bd2b..1af33d86fc 100644 --- a/crates/sdk/src/lib.rs +++ b/crates/sdk/src/lib.rs @@ -1,9 +1,10 @@ use std::{fs::read, marker::PhantomData, path::Path, sync::Arc}; -use commit::commit_app_exe; -use config::AppConfig; +#[cfg(feature = "evm-verify")] +use alloy_primitives::{Bytes, FixedBytes}; +#[cfg(feature = "evm-verify")] +use alloy_sol_types::{sol, SolCall, SolValue}; use eyre::Result; -use keygen::{AppProvingKey, AppVerifyingKey}; use openvm_build::{ build_guest_package, find_unique_executable, get_package, GuestOptions, TargetFilter, }; @@ -23,11 +24,7 @@ pub use openvm_continuations::{ static_verifier::{DefaultStaticVerifierPvHandler, StaticVerifierPvHandler}, RootSC, C, F, SC, }; -use openvm_native_recursion::halo2::{ - utils::Halo2ParamsReader, - wrapper::{EvmVerifier, Halo2WrapperProvingKey}, - RawEvmProof, -}; +use openvm_native_recursion::halo2::utils::Halo2ParamsReader; use openvm_stark_backend::proof::Proof; use openvm_stark_sdk::{ config::{baby_bear_poseidon2::BabyBearPoseidon2Engine, FriParameters}, @@ -40,12 +37,17 @@ use openvm_transpiler::{ transpiler::{Transpiler, TranspilerError}, FromElf, }; +#[cfg(feature = "evm-verify")] +use snark_verifier_sdk::{evm::gen_evm_verifier_sol_code, halo2::aggregation::AggregationCircuit}; use crate::{ - config::AggConfig, - keygen::{AggProvingKey, AggStarkProvingKey}, - prover::{AppProver, ContinuationProver, StarkProver}, + commit::commit_app_exe, + config::{AggConfig, AggregationTreeConfig, AppConfig}, + keygen::{AggProvingKey, AggStarkProvingKey, AppProvingKey, AppVerifyingKey}, + prover::{AppProver, StarkProver}, }; +#[cfg(feature = "evm-prove")] +use crate::{prover::EvmHalo2Prover, types::EvmProof}; pub mod codec; pub mod commit; @@ -56,13 +58,22 @@ pub mod prover; mod stdin; pub use stdin::*; -use crate::types::EvmProof; - pub mod fs; pub mod types; pub type NonRootCommittedExe = VmCommittedExe; +pub const EVM_HALO2_VERIFIER_INTERFACE: &str = + include_str!("../contracts/src/IOpenVmHalo2Verifier.sol"); +pub const EVM_HALO2_VERIFIER_TEMPLATE: &str = + include_str!("../contracts/template/OpenVmHalo2Verifier.sol"); + +#[cfg(feature = "evm-verify")] +sol! { + IOpenVmHalo2Verifier, + concat!(env!("CARGO_MANIFEST_DIR"), "/contracts/abi/IOpenVmHalo2Verifier.json"), +} + /// The payload of a verified guest VM execution with user public values extracted and /// verified. pub struct VerifiedContinuationVmPayload { @@ -78,12 +89,14 @@ pub struct VerifiedContinuationVmPayload { } pub struct GenericSdk> { + agg_tree_config: AggregationTreeConfig, _phantom: PhantomData, } impl> Default for GenericSdk { fn default() -> Self { Self { + agg_tree_config: AggregationTreeConfig::default(), _phantom: PhantomData, } } @@ -96,6 +109,14 @@ impl> GenericSdk { Self::default() } + pub fn agg_tree_config(&self) -> &AggregationTreeConfig { + &self.agg_tree_config + } + + pub fn set_agg_tree_config(&mut self, agg_tree_config: AggregationTreeConfig) { + self.agg_tree_config = agg_tree_config; + } + pub fn build>( &self, guest_opts: GuestOptions, @@ -176,7 +197,7 @@ impl> GenericSdk { VC::Executor: Chip, VC::Periphery: Chip, { - let app_prover = AppProver::new(app_pk.app_vm_pk.clone(), app_committed_exe); + let app_prover = AppProver::::new(app_pk.app_vm_pk.clone(), app_committed_exe); let proof = app_prover.generate_app_proof(inputs); Ok(proof) } @@ -241,11 +262,13 @@ impl> GenericSdk { VC::Executor: Chip, VC::Periphery: Chip, { - let stark_prover = StarkProver::new(app_pk, app_exe, agg_stark_pk); + let stark_prover = + StarkProver::::new(app_pk, app_exe, agg_stark_pk, self.agg_tree_config); let proof = stark_prover.generate_root_verifier_input(inputs); Ok(proof) } + #[cfg(feature = "evm-prove")] pub fn generate_evm_proof>( &self, reader: &impl Halo2ParamsReader, @@ -258,29 +281,237 @@ impl> GenericSdk { VC::Executor: Chip, VC::Periphery: Chip, { - let e2e_prover = ContinuationProver::new(reader, app_pk, app_exe, agg_pk); + let e2e_prover = + EvmHalo2Prover::::new(reader, app_pk, app_exe, agg_pk, self.agg_tree_config); let proof = e2e_prover.generate_proof_for_evm(inputs); Ok(proof) } - pub fn generate_snark_verifier_contract( + #[cfg(feature = "evm-verify")] + pub fn generate_halo2_verifier_solidity( &self, reader: &impl Halo2ParamsReader, agg_pk: &AggProvingKey, - ) -> Result { + ) -> Result { + use std::{ + fs::{create_dir_all, write}, + process::Command, + }; + + use eyre::Context; + use openvm_native_recursion::halo2::wrapper::EvmVerifierByteCode; + use snark_verifier::halo2_base::halo2_proofs::poly::commitment::Params; + use snark_verifier_sdk::SHPLONK; + use tempfile::tempdir; + use types::EvmHalo2Verifier; + + use crate::fs::{ + EVM_HALO2_VERIFIER_BASE_NAME, EVM_HALO2_VERIFIER_INTERFACE_NAME, + EVM_HALO2_VERIFIER_PARENT_NAME, + }; + let params = reader.read_params(agg_pk.halo2_pk.wrapper.pinning.metadata.config_params.k); - let evm_verifier = agg_pk.halo2_pk.wrapper.generate_evm_verifier(¶ms); + let pinning = &agg_pk.halo2_pk.wrapper.pinning; + + assert_eq!( + pinning.metadata.config_params.k as u32, + params.k(), + "Provided params don't match circuit config" + ); + + let halo2_verifier_code = gen_evm_verifier_sol_code::( + ¶ms, + pinning.pk.get_vk(), + pinning.metadata.num_pvs.clone(), + ); + + let wrapper_pvs = agg_pk.halo2_pk.wrapper.pinning.metadata.num_pvs.clone(); + let pvs_length = match wrapper_pvs.first() { + // We subtract 14 to exclude the KZG accumulators and the app exe + // and vm commits. + Some(v) => v + .checked_sub(14) + .expect("Unexpected number of static verifier wrapper public values"), + _ => panic!("Unexpected amount of instance columns in the static verifier wrapper"), + }; + + assert!( + pvs_length <= 8192, + "OpenVM Halo2 verifier contract does not support more than 8192 public values" + ); + + // Fill out the public values length and OpenVM version in the template + let openvm_verifier_code = EVM_HALO2_VERIFIER_TEMPLATE + .replace("{PUBLIC_VALUES_LENGTH}", &pvs_length.to_string()) + .replace("{OPENVM_VERSION}", env!("CARGO_PKG_VERSION")); + + // Create temp dir + let temp_dir = tempdir().wrap_err("Failed to create temp dir")?; + let temp_path = temp_dir.path(); + + // Make interfaces dir + let interfaces_path = temp_path.join("interfaces"); + create_dir_all(&interfaces_path)?; + + // Write the files to the temp dir. This is only for compilation + // purposes. + write( + interfaces_path.join(EVM_HALO2_VERIFIER_INTERFACE_NAME), + EVM_HALO2_VERIFIER_INTERFACE, + )?; + write( + temp_path.join(EVM_HALO2_VERIFIER_PARENT_NAME), + &halo2_verifier_code, + )?; + write( + temp_path.join(EVM_HALO2_VERIFIER_BASE_NAME), + &openvm_verifier_code, + )?; + + // Run solc from the temp dir + let output = Command::new("solc") + .current_dir(temp_path) + .arg("OpenVmHalo2Verifier.sol") + .arg("--no-optimize-yul") + .arg("--bin") + .arg("--optimize") + .arg("--optimize-runs") + .arg("100000") + .output()?; + + if !output.status.success() { + eyre::bail!( + "solc exited with status {}: {}", + output.status, + String::from_utf8_lossy(&output.stderr) + ); + } + + let bytecode = extract_binary( + &output.stdout, + "OpenVmHalo2Verifier.sol:OpenVmHalo2Verifier", + ); + + let evm_verifier = EvmHalo2Verifier { + halo2_verifier_code, + openvm_verifier_code, + openvm_verifier_interface: EVM_HALO2_VERIFIER_INTERFACE.to_string(), + artifact: EvmVerifierByteCode { + sol_compiler_version: "0.8.19".to_string(), + sol_compiler_options: "--no-optimize-yul --bin --optimize --optimize-runs 100000" + .to_string(), + bytecode, + }, + }; Ok(evm_verifier) } - pub fn verify_evm_proof( + #[cfg(feature = "evm-verify")] + /// Uses the `verify(..)` interface of the `OpenVmHalo2Verifier` contract. + pub fn verify_evm_halo2_proof( &self, - evm_verifier: &EvmVerifier, + openvm_verifier: &types::EvmHalo2Verifier, evm_proof: &EvmProof, ) -> Result { - let evm_proof: RawEvmProof = evm_proof.clone().try_into()?; - let gas_cost = Halo2WrapperProvingKey::evm_verify(evm_verifier, &evm_proof) - .map_err(|reason| eyre::eyre!("Sdk::verify_evm_proof: {reason:?}"))?; + use crate::types::NUM_BN254_ACCUMULATORS; + + let EvmProof { + accumulators, + proof, + user_public_values, + exe_commit, + leaf_commit, + } = evm_proof; + let mut exe_commit = *exe_commit; + let mut leaf_commit = *leaf_commit; + exe_commit.reverse(); + leaf_commit.reverse(); + + assert_eq!(accumulators.len(), NUM_BN254_ACCUMULATORS * 32); + let mut evm_accumulators: Vec = Vec::with_capacity(accumulators.len()); + accumulators + .chunks(32) + .for_each(|chunk| evm_accumulators.extend(chunk.iter().rev().cloned())); + + let mut proof_data = evm_accumulators; + proof_data.extend(proof); + + assert!( + user_public_values.len() % 32 == 0, + "User public values length must be a multiple of 32" + ); + + // Take the first byte of each 32 byte chunk, and pack them together + // into one payload. + let user_public_values: Bytes = + user_public_values + .chunks(32) + .fold(Vec::::new().into(), |acc: Bytes, chunk| { + // We only care about the first byte, everything else should be 0-bytes + (acc, FixedBytes::<1>::from(*chunk.first().unwrap())) + .abi_encode_packed() + .into() + }); + + let calldata = IOpenVmHalo2Verifier::verifyCall { + publicValues: user_public_values.clone(), + proofData: proof_data.into(), + appExeCommit: exe_commit.into(), + appVmCommit: leaf_commit.into(), + } + .abi_encode(); + let deployment_code = openvm_verifier.artifact.bytecode.clone(); + + let gas_cost = snark_verifier::loader::evm::deploy_and_call(deployment_code, calldata) + .map_err(|reason| eyre::eyre!("Sdk::verify_openvm_evm_proof: {reason:?}"))?; + Ok(gas_cost) } } + +/// We will split the output by whitespace and look for the following +/// sequence: +/// [ +/// ... +/// "=======", +/// "OpenVmHalo2Verifier.sol:OpenVmHalo2Verifier", +/// "=======", +/// "Binary:" +/// "[compiled bytecode]" +/// ... +/// ] +/// +/// Once we find "OpenVmHalo2Verifier.sol:OpenVmHalo2Verifier," we can skip +/// to the appropriate offset to get the compiled bytecode. +#[cfg(feature = "evm-verify")] +fn extract_binary(output: &[u8], contract_name: &str) -> Vec { + let split = split_by_ascii_whitespace(output); + let contract_name_bytes = contract_name.as_bytes(); + + for i in 0..split.len().saturating_sub(3) { + if split[i] == contract_name_bytes { + return hex::decode(split[i + 3]).expect("Invalid hex in Binary"); + } + } + + panic!("Contract '{}' not found", contract_name); +} + +#[cfg(feature = "evm-verify")] +fn split_by_ascii_whitespace(bytes: &[u8]) -> Vec<&[u8]> { + let mut split = Vec::new(); + let mut start = None; + for (idx, byte) in bytes.iter().enumerate() { + if byte.is_ascii_whitespace() { + if let Some(start) = start.take() { + split.push(&bytes[start..idx]); + } + } else if start.is_none() { + start = Some(idx); + } + } + if let Some(last) = start { + split.push(&bytes[last..]); + } + split +} diff --git a/crates/sdk/src/prover/agg.rs b/crates/sdk/src/prover/agg.rs index bcf202f60d..42f0d85d58 100644 --- a/crates/sdk/src/prover/agg.rs +++ b/crates/sdk/src/prover/agg.rs @@ -7,12 +7,11 @@ use openvm_continuations::verifier::{ }; use openvm_native_circuit::NativeConfig; use openvm_native_recursion::hints::Hintable; -use openvm_stark_sdk::{ - config::baby_bear_poseidon2::BabyBearPoseidon2Engine, openvm_stark_backend::proof::Proof, -}; +use openvm_stark_sdk::{engine::StarkFriEngine, openvm_stark_backend::proof::Proof}; use tracing::info_span; use crate::{ + config::AggregationTreeConfig, keygen::AggStarkProvingKey, prover::{ vm::{local::VmLocalProver, SingleSegmentVmProver}, @@ -21,15 +20,11 @@ use crate::{ NonRootCommittedExe, RootSC, F, SC, }; -pub const DEFAULT_NUM_CHILDREN_LEAF: usize = 1; -const DEFAULT_NUM_CHILDREN_INTERNAL: usize = 2; -const DEFAULT_MAX_INTERNAL_WRAPPER_LAYERS: usize = 4; - -pub struct AggStarkProver { - leaf_prover: VmLocalProver, +pub struct AggStarkProver> { + leaf_prover: VmLocalProver, leaf_controller: LeafProvingController, - internal_prover: VmLocalProver, + internal_prover: VmLocalProver, root_prover: RootVerifierLocalProver, pub num_children_internal: usize, @@ -41,19 +36,18 @@ pub struct LeafProvingController { pub num_children: usize, } -impl AggStarkProver { +impl> AggStarkProver { pub fn new( agg_stark_pk: AggStarkProvingKey, leaf_committed_exe: Arc, + tree_config: AggregationTreeConfig, ) -> Self { - let leaf_prover = VmLocalProver::::new( - agg_stark_pk.leaf_vm_pk, - leaf_committed_exe, - ); + let leaf_prover = + VmLocalProver::::new(agg_stark_pk.leaf_vm_pk, leaf_committed_exe); let leaf_controller = LeafProvingController { - num_children: DEFAULT_NUM_CHILDREN_LEAF, + num_children: tree_config.num_children_leaf, }; - let internal_prover = VmLocalProver::::new( + let internal_prover = VmLocalProver::::new( agg_stark_pk.internal_vm_pk, agg_stark_pk.internal_committed_exe, ); @@ -63,8 +57,8 @@ impl AggStarkProver { leaf_controller, internal_prover, root_prover, - num_children_internal: DEFAULT_NUM_CHILDREN_INTERNAL, - max_internal_wrapper_layers: DEFAULT_MAX_INTERNAL_WRAPPER_LAYERS, + num_children_internal: tree_config.num_children_internal, + max_internal_wrapper_layers: tree_config.max_internal_wrapper_layers, } } @@ -184,9 +178,9 @@ impl LeafProvingController { self } - pub fn generate_proof( + pub fn generate_proof>( &self, - prover: &VmLocalProver, + prover: &VmLocalProver, app_proofs: &ContinuationVmProof, ) -> Vec> { info_span!("agg_layer", group = "leaf").in_scope(|| { diff --git a/crates/sdk/src/prover/app.rs b/crates/sdk/src/prover/app.rs index 6ed7d528b7..095351677e 100644 --- a/crates/sdk/src/prover/app.rs +++ b/crates/sdk/src/prover/app.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use getset::Getters; use openvm_circuit::arch::{ContinuationVmProof, VmConfig}; use openvm_stark_backend::{proof::Proof, Chip}; -use openvm_stark_sdk::config::baby_bear_poseidon2::BabyBearPoseidon2Engine; +use openvm_stark_sdk::engine::StarkFriEngine; use tracing::info_span; use super::vm::SingleSegmentVmProver; @@ -13,13 +13,13 @@ use crate::{ }; #[derive(Getters)] -pub struct AppProver { +pub struct AppProver> { pub program_name: Option, #[getset(get = "pub")] - app_prover: VmLocalProver, + app_prover: VmLocalProver, } -impl AppProver { +impl> AppProver { pub fn new( app_vm_pk: Arc>, app_committed_exe: Arc, @@ -29,10 +29,7 @@ impl AppProver { { Self { program_name: None, - app_prover: VmLocalProver::::new( - app_vm_pk, - app_committed_exe, - ), + app_prover: VmLocalProver::::new(app_vm_pk, app_committed_exe), } } pub fn set_program_name(&mut self, program_name: impl AsRef) -> &mut Self { diff --git a/crates/sdk/src/prover/mod.rs b/crates/sdk/src/prover/mod.rs index 5e4fee43ad..67ccfe1eb8 100644 --- a/crates/sdk/src/prover/mod.rs +++ b/crates/sdk/src/prover/mod.rs @@ -1,67 +1,78 @@ -use std::sync::Arc; - -use openvm_circuit::arch::VmConfig; -use openvm_stark_sdk::openvm_stark_backend::Chip; - -use crate::{keygen::AppProvingKey, stdin::StdIn, NonRootCommittedExe, F, SC}; - mod agg; -pub use agg::*; mod app; -pub use app::*; -use openvm_native_recursion::halo2::utils::Halo2ParamsReader; - +#[cfg(feature = "evm-prove")] mod halo2; -#[allow(unused_imports)] -pub use halo2::*; mod root; -pub use root::*; mod stark; pub mod vm; -#[allow(unused_imports)] +pub use agg::*; +pub use app::*; +#[cfg(feature = "evm-prove")] +pub use evm::*; +#[cfg(feature = "evm-prove")] +pub use halo2::*; +pub use root::*; pub use stark::*; -use crate::{keygen::AggProvingKey, prover::halo2::Halo2Prover, types::EvmProof}; +#[cfg(feature = "evm-prove")] +mod evm { + use std::sync::Arc; -pub struct ContinuationProver { - stark_prover: StarkProver, - halo2_prover: Halo2Prover, -} + use openvm_circuit::arch::VmConfig; + use openvm_native_recursion::halo2::utils::Halo2ParamsReader; + use openvm_stark_sdk::{engine::StarkFriEngine, openvm_stark_backend::Chip}; -impl ContinuationProver { - pub fn new( - reader: &impl Halo2ParamsReader, - app_pk: Arc>, - app_committed_exe: Arc, - agg_pk: AggProvingKey, - ) -> Self - where - VC: VmConfig, - { - let AggProvingKey { - agg_stark_pk, - halo2_pk, - } = agg_pk; - let stark_prover = StarkProver::new(app_pk, app_committed_exe, agg_stark_pk); - Self { - stark_prover, - halo2_prover: Halo2Prover::new(reader, halo2_pk), - } - } + use super::{Halo2Prover, StarkProver}; + use crate::{ + config::AggregationTreeConfig, + keygen::{AggProvingKey, AppProvingKey}, + stdin::StdIn, + types::EvmProof, + NonRootCommittedExe, F, SC, + }; - pub fn set_program_name(&mut self, program_name: impl AsRef) -> &mut Self { - self.stark_prover.set_program_name(program_name); - self + pub struct EvmHalo2Prover> { + pub stark_prover: StarkProver, + pub halo2_prover: Halo2Prover, } - pub fn generate_proof_for_evm(&self, input: StdIn) -> EvmProof - where - VC: VmConfig, - VC::Executor: Chip, - VC::Periphery: Chip, - { - let root_proof = self.stark_prover.generate_proof_for_outer_recursion(input); - self.halo2_prover.prove_for_evm(&root_proof) + impl> EvmHalo2Prover { + pub fn new( + reader: &impl Halo2ParamsReader, + app_pk: Arc>, + app_committed_exe: Arc, + agg_pk: AggProvingKey, + agg_tree_config: AggregationTreeConfig, + ) -> Self + where + VC: VmConfig, + { + let AggProvingKey { + agg_stark_pk, + halo2_pk, + } = agg_pk; + let stark_prover = + StarkProver::new(app_pk, app_committed_exe, agg_stark_pk, agg_tree_config); + Self { + stark_prover, + halo2_prover: Halo2Prover::new(reader, halo2_pk), + } + } + + pub fn set_program_name(&mut self, program_name: impl AsRef) -> &mut Self { + self.stark_prover.set_program_name(program_name); + self + } + + pub fn generate_proof_for_evm(&self, input: StdIn) -> EvmProof + where + VC: VmConfig, + VC::Executor: Chip, + VC::Periphery: Chip, + { + let root_proof = self.stark_prover.generate_proof_for_outer_recursion(input); + self.halo2_prover.prove_for_evm(&root_proof) + } } } diff --git a/crates/sdk/src/prover/root.rs b/crates/sdk/src/prover/root.rs index 87548c7461..6e69aa0f13 100644 --- a/crates/sdk/src/prover/root.rs +++ b/crates/sdk/src/prover/root.rs @@ -52,7 +52,8 @@ impl RootVerifierLocalProver { impl SingleSegmentVmProver for RootVerifierLocalProver { fn prove(&self, input: impl Into>) -> Proof { let input = input.into(); - let vm = SingleSegmentVmExecutor::new(self.vm_config().clone()); + let mut vm = SingleSegmentVmExecutor::new(self.vm_config().clone()); + vm.set_override_trace_heights(self.root_verifier_pk.vm_heights.clone()); let mut proof_input = vm .execute_and_generate(self.root_verifier_pk.root_committed_exe.clone(), input) .unwrap(); diff --git a/crates/sdk/src/prover/stark.rs b/crates/sdk/src/prover/stark.rs index 99d053b1f4..c95bdc0655 100644 --- a/crates/sdk/src/prover/stark.rs +++ b/crates/sdk/src/prover/stark.rs @@ -3,22 +3,25 @@ use std::sync::Arc; use openvm_circuit::arch::VmConfig; use openvm_continuations::verifier::root::types::RootVmVerifierInput; use openvm_stark_backend::{proof::Proof, Chip}; +use openvm_stark_sdk::engine::StarkFriEngine; use crate::{ + config::AggregationTreeConfig, keygen::{AggStarkProvingKey, AppProvingKey}, prover::{agg::AggStarkProver, app::AppProver}, NonRootCommittedExe, RootSC, StdIn, F, SC, }; -pub struct StarkProver { - app_prover: AppProver, - agg_prover: AggStarkProver, +pub struct StarkProver> { + pub app_prover: AppProver, + pub agg_prover: AggStarkProver, } -impl StarkProver { +impl> StarkProver { pub fn new( app_pk: Arc>, app_committed_exe: Arc, agg_stark_pk: AggStarkProvingKey, + agg_tree_config: AggregationTreeConfig, ) -> Self where VC: VmConfig, @@ -35,7 +38,11 @@ impl StarkProver { Self { app_prover: AppProver::new(app_pk.app_vm_pk.clone(), app_committed_exe), - agg_prover: AggStarkProver::new(agg_stark_pk, app_pk.leaf_committed_exe.clone()), + agg_prover: AggStarkProver::new( + agg_stark_pk, + app_pk.leaf_committed_exe.clone(), + agg_tree_config, + ), } } pub fn set_program_name(&mut self, program_name: impl AsRef) -> &mut Self { diff --git a/crates/sdk/src/types.rs b/crates/sdk/src/types.rs index f46302d106..c4c6abcfbe 100644 --- a/crates/sdk/src/types.rs +++ b/crates/sdk/src/types.rs @@ -1,5 +1,5 @@ use itertools::Itertools; -use openvm_native_recursion::halo2::{Fr, RawEvmProof}; +use openvm_native_recursion::halo2::{wrapper::EvmVerifierByteCode, Fr, RawEvmProof}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; use thiserror::Error; @@ -7,15 +7,24 @@ use thiserror::Error; /// Number of bytes in a Bn254Fr. const BN254_BYTES: usize = 32; /// Number of Bn254Fr in `accumulators` field. -const NUM_BN254_ACCUMULATORS: usize = 12; +pub const NUM_BN254_ACCUMULATORS: usize = 12; /// Number of Bn254Fr in `proof` field for a circuit with only 1 advice column. const NUM_BN254_PROOF: usize = 43; +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct EvmHalo2Verifier { + pub halo2_verifier_code: String, + pub openvm_verifier_code: String, + pub openvm_verifier_interface: String, + pub artifact: EvmVerifierByteCode, +} + #[serde_as] -#[derive(Clone, Deserialize, Serialize)] +#[derive(Clone, Debug, Deserialize, Serialize)] pub struct EvmProof { #[serde_as(as = "serde_with::hex::Hex")] - /// Bn254Fr public values for accumulators in flatten little-endian bytes. Length is `NUM_BN254_ACCUMULATORS * BN254_BYTES`. + /// Bn254Fr public values for accumulators in flatten little-endian bytes. Length is + /// `NUM_BN254_ACCUMULATORS * BN254_BYTES`. pub accumulators: Vec, #[serde_as(as = "serde_with::hex::Hex")] /// 1 Bn254Fr public value for exe commit in little-endian bytes. @@ -27,7 +36,8 @@ pub struct EvmProof { /// Bn254Fr user public values in little-endian bytes. pub user_public_values: Vec, #[serde_as(as = "serde_with::hex::Hex")] - /// Bn254Fr proof in little-endian bytes. The circuit only has 1 advice column, so the proof is of length `NUM_BN254_PROOF * BN254_BYTES`. + /// Bn254Fr proof in little-endian bytes. The circuit only has 1 advice column, so the proof is + /// of length `NUM_BN254_PROOF * BN254_BYTES`. pub proof: Vec, } @@ -44,6 +54,7 @@ pub enum EvmProofConversionError { } impl EvmProof { + #[cfg(feature = "evm-prove")] /// Return bytes calldata to be passed to the verifier contract. pub fn verifier_calldata(&self) -> Vec { let evm_proof: RawEvmProof = self.clone().try_into().unwrap(); diff --git a/crates/sdk/tests/integration_test.rs b/crates/sdk/tests/integration_test.rs index 4d768b2aa1..708ba43bac 100644 --- a/crates/sdk/tests/integration_test.rs +++ b/crates/sdk/tests/integration_test.rs @@ -1,5 +1,6 @@ use std::{borrow::Borrow, path::PathBuf, sync::Arc}; +use eyre::Result; use openvm_build::GuestOptions; use openvm_circuit::{ arch::{ @@ -20,15 +21,24 @@ use openvm_continuations::{ use openvm_native_circuit::{Native, NativeConfig}; use openvm_native_compiler::{conversion::CompilerOptions, prelude::*}; use openvm_native_recursion::{ - config::outer::OuterConfig, halo2::utils::CacheHalo2ParamsReader, types::InnerConfig, + config::outer::OuterConfig, + halo2::{ + utils::{CacheHalo2ParamsReader, Halo2ParamsReader}, + wrapper::Halo2WrapperProvingKey, + RawEvmProof, + }, + types::InnerConfig, vars::StarkProofVariable, }; -use openvm_rv32im_transpiler::{Rv32ITranspilerExtension, Rv32MTranspilerExtension}; +use openvm_rv32im_transpiler::{ + Rv32ITranspilerExtension, Rv32IoTranspilerExtension, Rv32MTranspilerExtension, +}; use openvm_sdk::{ codec::{Decode, Encode}, commit::AppExecutionCommit, - config::{AggConfig, AggStarkConfig, AppConfig, Halo2Config, SdkVmConfig}, + config::{AggConfig, AggStarkConfig, AppConfig, Halo2Config, SdkSystemConfig, SdkVmConfig}, keygen::AppProvingKey, + types::{EvmHalo2Verifier, EvmProof}, DefaultStaticVerifierPvHandler, Sdk, StdIn, }; use openvm_stark_backend::{keygen::types::LinearConstraint, p3_matrix::Matrix}; @@ -43,6 +53,7 @@ use openvm_stark_sdk::{ p3_bn254_fr::Bn254Fr, }; use openvm_transpiler::transpiler::Transpiler; +use snark_verifier_sdk::evm::evm_verify; type SC = BabyBearPoseidon2Config; type C = InnerConfig; @@ -53,6 +64,23 @@ const LEAF_LOG_BLOWUP: usize = 2; const INTERNAL_LOG_BLOWUP: usize = 3; const ROOT_LOG_BLOWUP: usize = 4; +/// `OpenVmHalo2Verifier` wraps the `snark-verifer` contract, meaning that +/// the default `fallback` interface can still be used. This function uses +/// the fallback interface as opposed to the `verify(..)` interface. +fn verify_evm_halo2_proof_with_fallback( + openvm_verifier: &EvmHalo2Verifier, + evm_proof: &EvmProof, +) -> Result { + let evm_proof: RawEvmProof = evm_proof.clone().try_into()?; + let gas_cost = evm_verify( + openvm_verifier.artifact.bytecode.clone(), + vec![evm_proof.instances.clone()], + evm_proof.proof.clone(), + ) + .map_err(|reason| eyre::eyre!("Sdk::verify_openvm_evm_proof: {reason:?}"))?; + Ok(gas_cost) +} + fn run_leaf_verifier>( leaf_vm: &SingleSegmentVmExecutor, leaf_committed_exe: Arc>, @@ -265,6 +293,7 @@ fn test_public_values_and_leaf_verification() { } } +#[cfg(feature = "evm-verify")] #[test] fn test_static_verifier_custom_pv_handler() { // Define custom public values handler and implement StaticVerifierPvHandler trait on it @@ -340,9 +369,12 @@ fn test_static_verifier_custom_pv_handler() { // Generate verifier contract println!("generate verifier contract"); - let evm_verifier = sdk - .generate_snark_verifier_contract(¶ms_reader, &agg_pk) - .unwrap(); + let params = + params_reader.read_params(agg_pk.halo2_pk.wrapper.pinning.metadata.config_params.k); + let evm_verifier = agg_pk + .halo2_pk + .wrapper + .generate_fallback_evm_verifier(¶ms); // Generate and verify proof println!("generate and verify proof"); @@ -355,15 +387,52 @@ fn test_static_verifier_custom_pv_handler() { StdIn::default(), ) .unwrap(); - assert!(sdk.verify_evm_proof(&evm_verifier, &evm_proof).is_ok()); + + let evm_proof: RawEvmProof = evm_proof + .clone() + .try_into() + .expect("failed to convert evm proof"); + Halo2WrapperProvingKey::evm_verify(&evm_verifier, &evm_proof).unwrap(); } +#[cfg(feature = "evm-verify")] #[test] -fn test_e2e_proof_generation_and_verification() { - let app_log_blowup = 1; - let app_config = small_test_app_config(app_log_blowup); +fn test_e2e_proof_generation_and_verification_with_pvs() { + let mut pkg_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")).to_path_buf(); + pkg_dir.push("guest"); + + let vm_config = SdkVmConfig::builder() + .system(SdkSystemConfig { + config: SystemConfig::default() + .with_max_segment_len(200) + .with_continuations() + .with_public_values(NUM_PUB_VALUES), + }) + .rv32i(Default::default()) + .rv32m(Default::default()) + .io(Default::default()) + .native(Default::default()) + .build(); + let sdk = Sdk::new(); + let elf = sdk + .build(Default::default(), pkg_dir, &Default::default()) + .unwrap(); + let exe = sdk.transpile(elf, vm_config.transpiler()).unwrap(); + + let app_log_blowup = 1; + let app_fri_params = FriParameters::new_for_testing(app_log_blowup); + let leaf_fri_params = FriParameters::new_for_testing(LEAF_LOG_BLOWUP); + let mut app_config = + AppConfig::new_with_leaf_fri_params(app_fri_params, vm_config, leaf_fri_params); + app_config.compiler_options.enable_cycle_tracker = true; + + let app_committed_exe = sdk + .commit_app_exe(app_fri_params, exe) + .expect("failed to commit exe"); + let app_pk = sdk.app_keygen(app_config).unwrap(); + let params_reader = CacheHalo2ParamsReader::new_with_default_params_dir(); let agg_pk = sdk .agg_keygen( @@ -372,20 +441,24 @@ fn test_e2e_proof_generation_and_verification() { &DefaultStaticVerifierPvHandler, ) .unwrap(); + let evm_verifier = sdk - .generate_snark_verifier_contract(¶ms_reader, &agg_pk) + .generate_halo2_verifier_solidity(¶ms_reader, &agg_pk) .unwrap(); let evm_proof = sdk .generate_evm_proof( ¶ms_reader, Arc::new(app_pk), - app_committed_exe_for_test(app_log_blowup), + app_committed_exe, agg_pk, StdIn::default(), ) .unwrap(); - assert!(sdk.verify_evm_proof(&evm_verifier, &evm_proof).is_ok()); + + verify_evm_halo2_proof_with_fallback(&evm_verifier, &evm_proof).unwrap(); + sdk.verify_evm_halo2_proof(&evm_verifier, &evm_proof) + .unwrap(); } #[test] @@ -407,7 +480,8 @@ fn test_sdk_guest_build_and_transpile() { assert_eq!(one.instructions, two.instructions); let transpiler = Transpiler::::default() .with_extension(Rv32ITranspilerExtension) - .with_extension(Rv32MTranspilerExtension); + .with_extension(Rv32MTranspilerExtension) + .with_extension(Rv32IoTranspilerExtension); let _exe = sdk.transpile(one, transpiler).unwrap(); } @@ -418,10 +492,18 @@ fn test_inner_proof_codec_roundtrip() -> eyre::Result<()> { let mut pkg_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")).to_path_buf(); pkg_dir.push("guest"); let elf = sdk.build(Default::default(), pkg_dir, &Default::default())?; + let vm_config = SdkVmConfig::builder() - .system(Default::default()) + .system(SdkSystemConfig { + config: SystemConfig::default() + .with_max_segment_len(200) + .with_continuations() + .with_public_values(NUM_PUB_VALUES), + }) .rv32i(Default::default()) .rv32m(Default::default()) + .io(Default::default()) + .native(Default::default()) .build(); assert!(vm_config.system.config.continuation_enabled); let exe = sdk.transpile(elf, vm_config.transpiler())?; diff --git a/crates/toolchain/build/Cargo.toml b/crates/toolchain/build/Cargo.toml index 1f2b7eb91e..8f99b0f180 100644 --- a/crates/toolchain/build/Cargo.toml +++ b/crates/toolchain/build/Cargo.toml @@ -13,7 +13,7 @@ openvm-platform = { workspace = true } serde.workspace = true serde_json.workspace = true eyre.workspace = true -cargo_metadata = "0.18" +cargo_metadata.workspace = true [dev-dependencies] diff --git a/crates/toolchain/build/src/lib.rs b/crates/toolchain/build/src/lib.rs index e4450e7d41..8b1c8e1112 100644 --- a/crates/toolchain/build/src/lib.rs +++ b/crates/toolchain/build/src/lib.rs @@ -103,7 +103,8 @@ pub fn current_package() -> Package { get_package(env::var("CARGO_MANIFEST_DIR").unwrap()) } -/// Reads the value of the environment variable `OPENVM_SKIP_BUILD` and returns true if it is set to 1. +/// Reads the value of the environment variable `OPENVM_SKIP_BUILD` and returns true if it is set to +/// 1. pub fn is_skip_build() -> bool { !get_env_var(SKIP_BUILD_ENV).is_empty() } @@ -269,6 +270,13 @@ pub fn build_guest_package( return Err(None); } + // Check if the required toolchain and rust-src component are installed, and if not, install + // them. This requires that `rustup` is installed. + if let Err(code) = ensure_toolchain_installed(RUSTUP_TOOLCHAIN_NAME, &["rust-src"]) { + eprintln!("rustup toolchain commands failed. Please ensure rustup is installed (https://www.rust-lang.org/tools/install)"); + return Err(Some(code)); + } + let target_dir = guest_opts .target_dir .clone() @@ -414,10 +422,78 @@ pub fn detect_toolchain(name: &str) { let stdout = String::from_utf8(result.stdout).unwrap(); if !stdout.lines().any(|line| line.trim().starts_with(name)) { eprintln!("The '{name}' toolchain could not be found."); - // eprintln!("To install the risc0 toolchain, use rzup."); - // eprintln!("For example:"); - // eprintln!(" curl -L https://risczero.com/install | bash"); - // eprintln!(" rzup install"); std::process::exit(-1); } } + +/// Ensures the required toolchain and components are installed. +fn ensure_toolchain_installed(toolchain: &str, components: &[&str]) -> Result<(), i32> { + // Check if toolchain is installed + let output = Command::new("rustup") + .args(["toolchain", "list"]) + .output() + .map_err(|e| { + tty_println(&format!("Failed to check toolchains: {}", e)); + e.raw_os_error().unwrap_or(1) + })?; + + let toolchain_installed = String::from_utf8_lossy(&output.stdout) + .lines() + .any(|line| line.trim().starts_with(toolchain)); + + // Install toolchain if missing + if !toolchain_installed { + tty_println(&format!("Installing required toolchain: {}", toolchain)); + let status = Command::new("rustup") + .args(["toolchain", "install", toolchain]) + .status() + .map_err(|e| { + tty_println(&format!("Failed to install toolchain: {}", e)); + e.raw_os_error().unwrap_or(1) + })?; + + if !status.success() { + tty_println(&format!("Failed to install toolchain {}", toolchain)); + return Err(status.code().unwrap_or(1)); + } + } + + // Check and install missing components + for component in components { + let output = Command::new("rustup") + .args(["component", "list", "--toolchain", toolchain]) + .output() + .map_err(|e| { + tty_println(&format!("Failed to check components: {}", e)); + e.raw_os_error().unwrap_or(1) + })?; + + let is_installed = String::from_utf8_lossy(&output.stdout) + .lines() + .any(|line| line.contains(component) && line.contains("(installed)")); + + if !is_installed { + tty_println(&format!( + "Installing component {} for toolchain {}", + component, toolchain + )); + let status = Command::new("rustup") + .args(["component", "add", component, "--toolchain", toolchain]) + .status() + .map_err(|e| { + tty_println(&format!("Failed to install component: {}", e)); + e.raw_os_error().unwrap_or(1) + })?; + + if !status.success() { + tty_println(&format!( + "Failed to install component {} for toolchain {}", + component, toolchain + )); + return Err(status.code().unwrap_or(1)); + } + } + } + + Ok(()) +} diff --git a/crates/toolchain/custom_insn/src/lib.rs b/crates/toolchain/custom_insn/src/lib.rs index 57dedc55d0..7e5fe31233 100644 --- a/crates/toolchain/custom_insn/src/lib.rs +++ b/crates/toolchain/custom_insn/src/lib.rs @@ -290,13 +290,15 @@ mod kw { /// rs2 = In rs2 /// ); /// ``` -/// Here, `opcode`, `funct3`, and `funct7` are the opcode, funct3, and funct7 fields of the RISC-V instruction. -/// `rd`, `rs1`, and `rs2` are the destination register, source register 1, and source register 2 respectively. -/// The `In`, `Out`, `InOut`, and `Const` keywords are required to specify the type of the register arguments. -/// They translate to `in(reg)`, `out(reg)`, `inout(reg)`, and `const` respectively, and mean +/// Here, `opcode`, `funct3`, and `funct7` are the opcode, funct3, and funct7 fields of the RISC-V +/// instruction. `rd`, `rs1`, and `rs2` are the destination register, source register 1, and source +/// register 2 respectively. The `In`, `Out`, `InOut`, and `Const` keywords are required to specify +/// the type of the register arguments. They translate to `in(reg)`, `out(reg)`, `inout(reg)`, and +/// `const` respectively, and mean /// - "read the value from this variable" before execution (`In`), /// - "write the value to this variable" after execution (`Out`), -/// - "read the value from this variable, then write it back to the same variable" after execution (`InOut`), and +/// - "read the value from this variable, then write it back to the same variable" after execution +/// (`InOut`), and /// - "use this constant value" (`Const`). #[proc_macro] pub fn custom_insn_r(input: proc_macro::TokenStream) -> proc_macro::TokenStream { @@ -347,12 +349,14 @@ pub fn custom_insn_r(input: proc_macro::TokenStream) -> proc_macro::TokenStream /// ); /// ``` /// Here, `opcode`, `funct3` are the opcode and funct3 fields of the RISC-V instruction. -/// `rd`, `rs1`, and `imm` are the destination register, source register 1, and immediate value respectively. -/// The `In`, `Out`, `InOut`, and `Const` keywords are required to specify the type of the register arguments. -/// They translate to `in(reg)`, `out(reg)`, `inout(reg)`, and `const` respectively, and mean +/// `rd`, `rs1`, and `imm` are the destination register, source register 1, and immediate value +/// respectively. The `In`, `Out`, `InOut`, and `Const` keywords are required to specify the type of +/// the register arguments. They translate to `in(reg)`, `out(reg)`, `inout(reg)`, and `const` +/// respectively, and mean /// - "read the value from this variable" before execution (`In`), /// - "write the value to this variable" after execution (`Out`), -/// - "read the value from this variable, then write it back to the same variable" after execution (`InOut`), and +/// - "read the value from this variable, then write it back to the same variable" after execution +/// (`InOut`), and /// - "use this constant value" (`Const`). /// /// The `imm` argument is required to be a constant value. diff --git a/crates/toolchain/instructions/src/instruction.rs b/crates/toolchain/instructions/src/instruction.rs index 0650729616..4b0f31e271 100644 --- a/crates/toolchain/instructions/src/instruction.rs +++ b/crates/toolchain/instructions/src/instruction.rs @@ -100,7 +100,8 @@ impl Instruction { impl Default for Instruction { fn default() -> Self { Self { - opcode: VmOpcode::from_usize(0), // there is no real default opcode, this field must always be set + opcode: VmOpcode::from_usize(0), /* there is no real default opcode, this field must + * always be set */ a: T::default(), b: T::default(), c: T::default(), diff --git a/crates/toolchain/instructions/src/lib.rs b/crates/toolchain/instructions/src/lib.rs index 5c5455344d..c251e77d0d 100644 --- a/crates/toolchain/instructions/src/lib.rs +++ b/crates/toolchain/instructions/src/lib.rs @@ -11,7 +11,8 @@ pub mod exe; pub mod instruction; mod phantom; pub mod program; -/// Module with traits and constants for RISC-V instruction definitions for custom OpenVM instructions. +/// Module with traits and constants for RISC-V instruction definitions for custom OpenVM +/// instructions. pub mod riscv; pub mod utils; diff --git a/crates/toolchain/instructions/src/phantom.rs b/crates/toolchain/instructions/src/phantom.rs index 1ae7e2d49a..06139d2507 100644 --- a/crates/toolchain/instructions/src/phantom.rs +++ b/crates/toolchain/instructions/src/phantom.rs @@ -3,11 +3,13 @@ use strum::FromRepr; #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct PhantomDiscriminant(pub u16); -/// Phantom instructions owned by the system. These are handled in the `ExecutionSegment`, as opposed to the `PhantomChip`. +/// Phantom instructions owned by the system. These are handled in the `ExecutionSegment`, as +/// opposed to the `PhantomChip`. #[derive(Copy, Clone, Debug, PartialEq, Eq, FromRepr)] #[repr(u16)] pub enum SysPhantom { - /// Does nothing at constraint and runtime level besides advance pc by [DEFAULT_PC_STEP](super::program::DEFAULT_PC_STEP). + /// Does nothing at constraint and runtime level besides advance pc by + /// [DEFAULT_PC_STEP](super::program::DEFAULT_PC_STEP). Nop = 0, /// Causes the runtime to panic, on host machine and prints a backtrace. DebugPanic, diff --git a/crates/toolchain/instructions/src/program.rs b/crates/toolchain/instructions/src/program.rs index 568d794f64..08bbc51806 100644 --- a/crates/toolchain/instructions/src/program.rs +++ b/crates/toolchain/instructions/src/program.rs @@ -17,7 +17,8 @@ pub const MAX_ALLOWED_PC: u32 = (1 << PC_BITS) - 1; pub struct Program { /// A map from program counter to instruction. /// Sometimes the instructions are enumerated as 0, 4, 8, etc. - /// Maybe at some point we will replace this with a struct that would have a `Vec` under the hood and divide the incoming `pc` by whatever given. + /// Maybe at some point we will replace this with a struct that would have a `Vec` under the + /// hood and divide the incoming `pc` by whatever given. pub instructions_and_debug_infos: Vec, Option)>>, pub step: u32, pub pc_base: u32, @@ -71,8 +72,8 @@ impl Program { } } - /// We assume that pc_start = pc_base = 0 everywhere except the RISC-V programs, until we need otherwise - /// We use [DEFAULT_PC_STEP] for consistency with RISC-V + /// We assume that pc_start = pc_base = 0 everywhere except the RISC-V programs, until we need + /// otherwise We use [DEFAULT_PC_STEP] for consistency with RISC-V pub fn from_instructions_and_debug_infos( instructions: &[Instruction], debug_infos: &[Option], diff --git a/crates/toolchain/openvm/src/io/mod.rs b/crates/toolchain/openvm/src/io/mod.rs index de22862f45..0c3b80ccca 100644 --- a/crates/toolchain/openvm/src/io/mod.rs +++ b/crates/toolchain/openvm/src/io/mod.rs @@ -66,14 +66,16 @@ pub(crate) fn read_vec_by_len(len: usize) -> Vec { // Allocate a buffer of the required length // We prefer that the allocator should allocate this buffer to a 4-byte boundary, // but we do not specify it here because `Vec` safety requires the alignment to - // exactly equal the alignment of `u8`, which is 1. See `Vec::from_raw_parts` for more details. + // exactly equal the alignment of `u8`, which is 1. See `Vec::from_raw_parts` for more + // details. // // Note: the bump allocator we use by default has minimum alignment of 4 bytes. // The heap-embedded-alloc uses linked list allocator, which has a minimum alignment of // `sizeof(usize) * 2 = 8` on 32-bit architectures: https://github.com/rust-osdev/linked-list-allocator/blob/b5caf3271259ddda60927752fa26527e0ccd2d56/src/hole.rs#L429 let mut bytes = Vec::with_capacity(capacity); hint_buffer_u32!(bytes.as_mut_ptr(), num_words); - // SAFETY: We populate a `Vec` by hintstore-ing `num_words` 4 byte words. We set the length to `len` and don't care about the extra `capacity - len` bytes stored. + // SAFETY: We populate a `Vec` by hintstore-ing `num_words` 4 byte words. We set the + // length to `len` and don't care about the extra `capacity - len` bytes stored. unsafe { bytes.set_len(len); } diff --git a/crates/toolchain/openvm/src/lib.rs b/crates/toolchain/openvm/src/lib.rs index 88bffc471d..134d10b559 100644 --- a/crates/toolchain/openvm/src/lib.rs +++ b/crates/toolchain/openvm/src/lib.rs @@ -88,8 +88,8 @@ macro_rules! entry { } }; } -/// This macro does nothing. You should name the function `main` so that the normal rust main function -/// setup is used. +/// This macro does nothing. You should name the function `main` so that the normal rust main +/// function setup is used. #[cfg(any(feature = "std", not(target_os = "zkvm")))] #[macro_export] macro_rules! entry { diff --git a/crates/toolchain/openvm/src/pal_abi.rs b/crates/toolchain/openvm/src/pal_abi.rs index 5e7717df24..0ab3d3f386 100644 --- a/crates/toolchain/openvm/src/pal_abi.rs +++ b/crates/toolchain/openvm/src/pal_abi.rs @@ -1,9 +1,9 @@ /// For rust std library compatibility, we need to define the ABI specified in /// /// while we are using target = "riscv32im-risc0-zkvm-elf". -/// This will be removed once a dedicated rust toolchain is used because OpenVM does not handle system -/// operations in the same way: there is no operating system and even the standard library should be -/// directly handled with intrinsics. +/// This will be removed once a dedicated rust toolchain is used because OpenVM does not handle +/// system operations in the same way: there is no operating system and even the standard +/// library should be directly handled with intrinsics. use openvm_platform::{fileno::*, memory::sys_alloc_aligned, rust_rt::terminate, WORD_SIZE}; use openvm_rv32im_guest::{hint_buffer_u32, hint_random, raw_print_str_from_bytes}; diff --git a/crates/toolchain/platform/src/getrandom.rs b/crates/toolchain/platform/src/getrandom.rs index 4b8753e6dd..1edb4578ee 100644 --- a/crates/toolchain/platform/src/getrandom.rs +++ b/crates/toolchain/platform/src/getrandom.rs @@ -1,4 +1,5 @@ -//! We need to export a custom getrandom implementation just to get crates that import getrandom to compile. +//! We need to export a custom getrandom implementation just to get crates that import getrandom to +//! compile. use getrandom::{register_custom_getrandom, Error}; /// This is a getrandom handler for the zkvm. It's intended to hook into a diff --git a/crates/toolchain/platform/src/rust_rt.rs b/crates/toolchain/platform/src/rust_rt.rs index 1cef2cb072..477dd73ce6 100644 --- a/crates/toolchain/platform/src/rust_rt.rs +++ b/crates/toolchain/platform/src/rust_rt.rs @@ -1,13 +1,13 @@ //! This module contains the components required to link a Rust binary. //! //! In particular: -//! * It defines an entrypoint ensuring initialization and finalization are done -//! properly. +//! * It defines an entrypoint ensuring initialization and finalization are done properly. //! * It includes a panic handler. //! * It includes an allocator. -/// WARNING: the [SYSTEM_OPCODE] here should be equal to `SYSTEM_OPCODE` in `extensions_rv32im_guest` -/// Can't import `openvm_rv32im_guest` here because would create a circular dependency +/// WARNING: the [SYSTEM_OPCODE] here should be equal to `SYSTEM_OPCODE` in +/// `extensions_rv32im_guest` Can't import `openvm_rv32im_guest` here because would create a +/// circular dependency #[cfg(target_os = "zkvm")] /// This is custom-0 defined in RISC-V spec document const SYSTEM_OPCODE: u8 = 0x0b; diff --git a/crates/toolchain/tests/Cargo.toml b/crates/toolchain/tests/Cargo.toml index 47d697dc03..d31d388c32 100644 --- a/crates/toolchain/tests/Cargo.toml +++ b/crates/toolchain/tests/Cargo.toml @@ -8,7 +8,6 @@ homepage.workspace = true repository.workspace = true [dependencies] -openvm-circuit-primitives-derive.workspace = true openvm-stark-backend.workspace = true openvm-stark-sdk.workspace = true openvm-circuit = { workspace = true, features = ["test-utils"] } @@ -35,3 +34,6 @@ num-bigint.workspace = true [features] default = ["parallel"] parallel = ["openvm-circuit/parallel"] + +[package.metadata.cargo-shear] +ignored = ["derive_more", "openvm-stark-backend"] diff --git a/crates/toolchain/tests/tests/transpiler_tests.rs b/crates/toolchain/tests/tests/transpiler_tests.rs index 350f8bca07..82d74afcca 100644 --- a/crates/toolchain/tests/tests/transpiler_tests.rs +++ b/crates/toolchain/tests/tests/transpiler_tests.rs @@ -3,7 +3,6 @@ use std::{ path::{Path, PathBuf}, }; -use derive_more::derive::From; use eyre::Result; use num_bigint::BigUint; use openvm_algebra_circuit::{ @@ -13,14 +12,10 @@ use openvm_algebra_circuit::{ use openvm_algebra_transpiler::{Fp2TranspilerExtension, ModularTranspilerExtension}; use openvm_bigint_circuit::{Int256, Int256Executor, Int256Periphery}; use openvm_circuit::{ - arch::{ - SystemConfig, SystemExecutor, SystemPeriphery, VmChipComplex, VmConfig, VmExecutor, - VmInventoryError, - }, - derive::{AnyEnum, InstructionExecutor, VmConfig}, + arch::{SystemConfig, VmExecutor}, + derive::VmConfig, utils::air_test, }; -use openvm_circuit_primitives_derive::{Chip, ChipUsageGetter}; use openvm_ecc_guest::k256::{SECP256K1_MODULUS, SECP256K1_ORDER}; use openvm_instructions::exe::VmExe; use openvm_platform::memory::MEM_SIZE; @@ -55,8 +50,8 @@ fn test_decode_elf() -> Result<()> { } // To create ELF directly from .S file, `brew install riscv-gnu-toolchain` and run -// `riscv64-unknown-elf-gcc -march=rv32im -mabi=ilp32 -nostartfiles -e _start -Ttext 0 fib.S -o rv32im-fib-from-as` -// riscv64-unknown-elf-gcc supports rv32im if you set -march target +// `riscv64-unknown-elf-gcc -march=rv32im -mabi=ilp32 -nostartfiles -e _start -Ttext 0 fib.S -o +// rv32im-fib-from-as` riscv64-unknown-elf-gcc supports rv32im if you set -march target #[test_case("tests/data/rv32im-fib-from-as")] #[test_case("tests/data/rv32im-intrin-from-as")] fn test_generate_program(elf_path: &str) -> Result<()> { diff --git a/crates/toolchain/transpiler/src/extension.rs b/crates/toolchain/transpiler/src/extension.rs index 3ed4d054fd..a8061642a3 100644 --- a/crates/toolchain/transpiler/src/extension.rs +++ b/crates/toolchain/transpiler/src/extension.rs @@ -2,13 +2,14 @@ use openvm_instructions::instruction::Instruction; /// Trait to add custom RISC-V instruction transpilation to OpenVM instruction format. /// RISC-V instructions always come in 32-bit chunks. -/// An important feature is that multiple 32-bit RISC-V instructions can be transpiled into a single OpenVM instruction. -/// See [process_custom](Self::process_custom) for details. +/// An important feature is that multiple 32-bit RISC-V instructions can be transpiled into a single +/// OpenVM instruction. See [process_custom](Self::process_custom) for details. pub trait TranspilerExtension { - /// The `instruction_stream` provides a view of the remaining RISC-V instructions to be processed, - /// presented as 32-bit chunks. The [process_custom](Self::process_custom) should determine if it knows how to transpile - /// the next contiguous section of RISC-V instructions into an [`Instruction`]. - /// It returns `None` if it cannot transpile. Otherwise it returns `TranspilerOutput { instructions, used_u32s }` to indicate that + /// The `instruction_stream` provides a view of the remaining RISC-V instructions to be + /// processed, presented as 32-bit chunks. The [process_custom](Self::process_custom) should + /// determine if it knows how to transpile the next contiguous section of RISC-V + /// instructions into an [`Instruction`]. It returns `None` if it cannot transpile. + /// Otherwise it returns `TranspilerOutput { instructions, used_u32s }` to indicate that /// `instruction_stream[..used_u32s]` should be transpiled into `instructions`. fn process_custom(&self, instruction_stream: &[u32]) -> Option>; } diff --git a/crates/toolchain/transpiler/src/transpiler.rs b/crates/toolchain/transpiler/src/transpiler.rs index 7a15247c55..54e2c1e91d 100644 --- a/crates/toolchain/transpiler/src/transpiler.rs +++ b/crates/toolchain/transpiler/src/transpiler.rs @@ -42,11 +42,11 @@ impl Transpiler { } /// Iterates over a sequence of 32-bit RISC-V instructions `instructions_u32`. The iterator - /// applies every processor in the [`Transpiler`] to determine if one of them knows how to transpile - /// the current instruction (and possibly a contiguous section of following instructions). - /// If so, it advances the iterator by the amount specified by the processor. - /// The transpiler will panic if two different processors claim to know how to transpile the same instruction - /// to avoid ambiguity. + /// applies every processor in the [`Transpiler`] to determine if one of them knows how to + /// transpile the current instruction (and possibly a contiguous section of following + /// instructions). If so, it advances the iterator by the amount specified by the processor. + /// The transpiler will panic if two different processors claim to know how to transpile the + /// same instruction to avoid ambiguity. pub fn transpile( &self, instructions_u32: &[u32], diff --git a/crates/toolchain/transpiler/src/util.rs b/crates/toolchain/transpiler/src/util.rs index 4d6d858121..d9135de153 100644 --- a/crates/toolchain/transpiler/src/util.rs +++ b/crates/toolchain/transpiler/src/util.rs @@ -39,7 +39,8 @@ pub fn from_r_type( ) } -/// Create a new [`Instruction`] from an I-type instruction. Should only be used for ALU instructions because `imm` is transpiled in a special way. +/// Create a new [`Instruction`] from an I-type instruction. Should only be used for ALU +/// instructions because `imm` is transpiled in a special way. pub fn from_i_type(opcode: usize, dec_insn: &IType) -> Instruction { if dec_insn.rd == 0 { return nop(); @@ -147,7 +148,8 @@ pub fn from_u_type(opcode: usize, dec_insn: &UType) -> Instruct ) } -/// Create a new [`Instruction`] that exits with code 2. This is equivalent to program panic but with a special exit code for debugging. +/// Create a new [`Instruction`] that exits with code 2. This is equivalent to program panic but +/// with a special exit code for debugging. pub fn unimp() -> Instruction { Instruction { opcode: SystemOpcode::TERMINATE.global_opcode(), diff --git a/crates/vm/Cargo.toml b/crates/vm/Cargo.toml index 7976dc5a62..80e6794b48 100644 --- a/crates/vm/Cargo.toml +++ b/crates/vm/Cargo.toml @@ -46,7 +46,7 @@ openvm-native-compiler.workspace = true openvm-rv32im-transpiler.workspace = true [features] -default = ["parallel", "mimalloc"] +default = ["parallel", "jemalloc"] parallel = ["openvm-stark-backend/parallel"] test-utils = ["dep:openvm-stark-sdk"] bench-metrics = ["dep:metrics", "openvm-stark-backend/bench-metrics"] diff --git a/crates/vm/derive/src/lib.rs b/crates/vm/derive/src/lib.rs index 472a6aeb59..37dca6e4ed 100644 --- a/crates/vm/derive/src/lib.rs +++ b/crates/vm/derive/src/lib.rs @@ -26,8 +26,8 @@ pub fn instruction_executor_derive(input: TokenStream) -> TokenStream { } _ => panic!("Only unnamed fields are supported"), }; - // Use full path ::openvm_circuit... so it can be used either within or outside the vm crate. - // Assume F is already generic of the field. + // Use full path ::openvm_circuit... so it can be used either within or outside the vm + // crate. Assume F is already generic of the field. let mut new_generics = generics.clone(); let where_clause = new_generics.make_where_clause(); where_clause.predicates.push( @@ -73,8 +73,8 @@ pub fn instruction_executor_derive(input: TokenStream) -> TokenStream { _ => None, }) .expect("First generic must be type for Field"); - // Use full path ::openvm_circuit... so it can be used either within or outside the vm crate. - // Assume F is already generic of the field. + // Use full path ::openvm_circuit... so it can be used either within or outside the vm + // crate. Assume F is already generic of the field. let (execute_arms, get_opcode_name_arms): (Vec<_>, Vec<_>) = multiunzip(variants.iter().map(|(variant_name, field)| { let field_ty = &field.ty; @@ -317,12 +317,15 @@ pub fn vm_generic_config_derive(input: proc_macro::TokenStream) -> proc_macro::T #field_name_upper(#periphery_name), }); create_chip_complex.push(quote! { - let complex: VmChipComplex = complex.extend(&self.#field_name)?; + let complex: ::openvm_circuit::arch::VmChipComplex = complex.extend(&self.#field_name)?; }); } let (source_executor_type, source_periphery_type) = match &source { - Source::System(_) => (quote! { SystemExecutor }, quote! { SystemPeriphery }), + Source::System(_) => ( + quote! { ::openvm_circuit::arch::SystemExecutor }, + quote! { ::openvm_circuit::arch::SystemPeriphery }, + ), Source::Config(field_ident) => { let field_type = fields .iter() @@ -344,34 +347,34 @@ pub fn vm_generic_config_derive(input: proc_macro::TokenStream) -> proc_macro::T let periphery_type = Ident::new(&format!("{}Periphery", name), name.span()); TokenStream::from(quote! { - #[derive(ChipUsageGetter, Chip, InstructionExecutor, From, AnyEnum)] + #[derive(::openvm_circuit::circuit_derive::ChipUsageGetter, ::openvm_circuit::circuit_derive::Chip, ::openvm_circuit::derive::InstructionExecutor, ::derive_more::derive::From, ::openvm_circuit::derive::AnyEnum)] pub enum #executor_type { #[any_enum] #source_name_upper(#source_executor_type), #(#executor_enum_fields)* } - #[derive(ChipUsageGetter, Chip, From, AnyEnum)] + #[derive(::openvm_circuit::circuit_derive::ChipUsageGetter, ::openvm_circuit::circuit_derive::Chip, ::derive_more::derive::From, ::openvm_circuit::derive::AnyEnum)] pub enum #periphery_type { #[any_enum] #source_name_upper(#source_periphery_type), #(#periphery_enum_fields)* } - impl VmConfig for #name { + impl ::openvm_circuit::arch::VmConfig for #name { type Executor = #executor_type; type Periphery = #periphery_type; - fn system(&self) -> &SystemConfig { - VmConfig::::system(&self.#source_name) + fn system(&self) -> &::openvm_circuit::arch::SystemConfig { + ::openvm_circuit::arch::VmConfig::::system(&self.#source_name) } - fn system_mut(&mut self) -> &mut SystemConfig { - VmConfig::::system_mut(&mut self.#source_name) + fn system_mut(&mut self) -> &mut ::openvm_circuit::arch::SystemConfig { + ::openvm_circuit::arch::VmConfig::::system_mut(&mut self.#source_name) } fn create_chip_complex( &self, - ) -> Result, VmInventoryError> { + ) -> Result<::openvm_circuit::arch::VmChipComplex, ::openvm_circuit::arch::VmInventoryError> { let complex = self.#source_name.create_chip_complex()?; #(#create_chip_complex)* Ok(complex) diff --git a/crates/vm/src/arch/config.rs b/crates/vm/src/arch/config.rs index f804ecc2b0..30d92130a1 100644 --- a/crates/vm/src/arch/config.rs +++ b/crates/vm/src/arch/config.rs @@ -39,7 +39,10 @@ pub trait VmConfig: Clone + Serialize + DeserializeOwned { #[derive(Debug, Serialize, Deserialize, Clone, new, Copy)] pub struct MemoryConfig { - /// The maximum height of the address space. This means the trie has `as_height` layers for searching the address space. The allowed address spaces are those in the range `[as_offset, as_offset + 2^as_height)` where `as_offset` is currently fixed to `1` to not allow address space `0` in memory. + /// The maximum height of the address space. This means the trie has `as_height` layers for + /// searching the address space. The allowed address spaces are those in the range `[as_offset, + /// as_offset + 2^as_height)` where `as_offset` is currently fixed to `1` to not allow address + /// space `0` in memory. pub as_height: usize, /// The offset of the address space. Should be fixed to equal `1`. pub as_offset: u32, @@ -78,9 +81,9 @@ pub struct SystemConfig { /// In single segment mode, `num_public_values` is the number of public values of /// `PublicValuesChip`. In this case, verifier can read public values directly. /// In continuation mode, public values are stored in a special address space. - /// `num_public_values` indicates the number of allowed addresses in that address space. The verifier - /// cannot read public values directly, but they can decommit the public values from the memory - /// merkle root. + /// `num_public_values` indicates the number of allowed addresses in that address space. The + /// verifier cannot read public values directly, but they can decommit the public values + /// from the memory merkle root. pub num_public_values: usize, /// Whether to collect detailed profiling metrics. /// **Warning**: this slows down the runtime. diff --git a/crates/vm/src/arch/execution.rs b/crates/vm/src/arch/execution.rs index 080abd73ce..4edc88d355 100644 --- a/crates/vm/src/arch/execution.rs +++ b/crates/vm/src/arch/execution.rs @@ -234,7 +234,8 @@ impl ExecutionBridge { } } - /// If `to_pc` is `Some`, then `pc_inc` is ignored and the `to_state` uses `to_pc`. Otherwise `to_pc = from_pc + pc_inc`. + /// If `to_pc` is `Some`, then `pc_inc` is ignored and the `to_state` uses `to_pc`. Otherwise + /// `to_pc = from_pc + pc_inc`. pub fn execute_and_increment_or_set_pc( &self, opcode: impl Into, @@ -314,7 +315,8 @@ impl From<(u32, Option)> for PcIncOrSet { } /// Phantom sub-instructions affect the runtime of the VM and the trace matrix values. -/// However they all have no AIR constraints besides advancing the pc by [DEFAULT_PC_STEP](openvm_instructions::program::DEFAULT_PC_STEP). +/// However they all have no AIR constraints besides advancing the pc by +/// [DEFAULT_PC_STEP](openvm_instructions::program::DEFAULT_PC_STEP). /// /// They should not mutate memory, but they can mutate the input & hint streams. /// diff --git a/crates/vm/src/arch/extensions.rs b/crates/vm/src/arch/extensions.rs index f92ba3dcae..adda318f6a 100644 --- a/crates/vm/src/arch/extensions.rs +++ b/crates/vm/src/arch/extensions.rs @@ -266,7 +266,8 @@ impl VmInventory { } } - /// Append `other` to current inventory. This means `self` comes earlier in the dependency chain. + /// Append `other` to current inventory. This means `self` comes earlier in the dependency + /// chain. pub fn append(&mut self, mut other: VmInventory) -> Result<(), VmInventoryError> { let num_executors = self.executors.len(); let num_periphery = self.periphery.len(); @@ -651,7 +652,8 @@ impl VmChipComplex { { let mut builder = VmInventoryBuilder::new(&self.config, &self.base, &self.streams, self.bus_idx_mgr); - // Add range checker for convenience, the other system base chips aren't included - they can be accessed directly from builder + // Add range checker for convenience, the other system base chips aren't included - they can + // be accessed directly from builder builder.add_chip(&self.base.range_checker_chip); for chip in self.inventory.executors() { builder.add_chip(chip); @@ -790,7 +792,8 @@ impl VmChipComplex { self.base.memory_controller.set_initial_memory(memory); } - /// Warning: this sets the stream in all chips which have a shared mutable reference to the streams. + /// Warning: this sets the stream in all chips which have a shared mutable reference to the + /// streams. pub(crate) fn set_streams(&mut self, streams: Streams) { *self.streams.lock().unwrap() = streams; } @@ -870,7 +873,8 @@ impl VmChipComplex { /// useful for regular users. /// /// **Warning**: the order of `get_trace_heights` is deterministic, but it is not the same as - /// the order of `air_names`. In other words, the order here does not match the order of AIR IDs. + /// the order of `air_names`. In other words, the order here does not match the order of AIR + /// IDs. pub fn get_internal_trace_heights(&self) -> VmComplexTraceHeights where E: ChipUsageGetter, @@ -885,8 +889,9 @@ impl VmChipComplex { /// Return dummy trace heights of (SystemBase, Inventory). Usually this is for aggregation to /// generate a dummy proof and not useful for regular users. /// - /// **Warning**: the order of `get_dummy_trace_heights` is deterministic, but it is not the same as - /// the order of `air_names`. In other words, the order here does not match the order of AIR IDs. + /// **Warning**: the order of `get_dummy_trace_heights` is deterministic, but it is not the same + /// as the order of `air_names`. In other words, the order here does not match the order of + /// AIR IDs. pub fn get_dummy_internal_trace_heights(&self) -> VmComplexTraceHeights where E: ChipUsageGetter, @@ -944,8 +949,9 @@ impl VmChipComplex { } /// Return trace cells of all chips in order. - /// This returns 0 cells for chips with preprocessed trace because the number of trace cells is constant in those cases. - /// This function is used to sample periodically and provided to the segmentation strategy to decide whether to segment during execution. + /// This returns 0 cells for chips with preprocessed trace because the number of trace cells is + /// constant in those cases. This function is used to sample periodically and provided to + /// the segmentation strategy to decide whether to segment during execution. pub(crate) fn current_trace_cells(&self) -> Vec where E: ChipUsageGetter, @@ -956,10 +962,16 @@ impl VmChipComplex { .into_iter() .chain(self._public_values_chip().map(|c| c.current_trace_cells())) .chain(self.memory_controller().current_trace_cells()) - .chain( - self.chips_excluding_pv_chip() - .map(|c| c.current_trace_cells()), - ) + .chain(self.chips_excluding_pv_chip().map(|c| match c { + Either::Executor(c) => c.current_trace_cells(), + Either::Periphery(c) => { + if c.constant_trace_height().is_some() { + 0 + } else { + c.current_trace_cells() + } + } + })) .chain([0]) // range_checker_chip .collect() } @@ -1056,9 +1068,10 @@ impl VmChipComplex { debug_assert_eq!(builder.curr_air_id, CONNECTOR_AIR_ID); builder.add_air_proof_input(connector_chip.generate_air_proof_input()); - // Go through all chips in inventory in reverse order they were added (to resolve dependencies) - // Important Note: for air_id ordering reasons, we want to generate_air_proof_input for - // public values and memory chips **last** but include them into the `builder` **first**. + // Go through all chips in inventory in reverse order they were added (to resolve + // dependencies) Important Note: for air_id ordering reasons, we want to + // generate_air_proof_input for public values and memory chips **last** but include + // them into the `builder` **first**. let mut public_values_input = None; let mut insertion_order = self.inventory.insertion_order; insertion_order.reverse(); @@ -1141,7 +1154,8 @@ impl VmProofInputBuilder { } } /// Adds air proof input if one of the main trace matrices is non-empty. - /// Always increments the internal `curr_air_id` regardless of whether a new air proof input was added or not. + /// Always increments the internal `curr_air_id` regardless of whether a new air proof input was + /// added or not. fn add_air_proof_input(&mut self, air_proof_input: AirProofInput) { let h = if !air_proof_input.raw.cached_mains.is_empty() { air_proof_input.raw.cached_mains[0].height() diff --git a/crates/vm/src/arch/integration_api.rs b/crates/vm/src/arch/integration_api.rs index 745f8b58cc..b1116d8c48 100644 --- a/crates/vm/src/arch/integration_api.rs +++ b/crates/vm/src/arch/integration_api.rs @@ -49,8 +49,8 @@ pub trait VmAdapterChip { type Interface: VmAdapterInterface; - /// Given instruction, perform memory reads and return only the read data that the integrator needs to use. - /// This is called at the start of instruction execution. + /// Given instruction, perform memory reads and return only the read data that the integrator + /// needs to use. This is called at the start of instruction execution. /// /// The implementer may choose to store data in the `Self::ReadRecord` struct, for example in /// an [Option], which will later be sent to the `postprocess` method. @@ -64,8 +64,9 @@ pub trait VmAdapterChip { Self::ReadRecord, )>; - /// Given instruction and the data to write, perform memory writes and return the `(record, next_timestamp)` - /// of the full adapter record for this instruction. This is guaranteed to be called after `preprocess`. + /// Given instruction and the data to write, perform memory writes and return the `(record, + /// next_timestamp)` of the full adapter record for this instruction. This is guaranteed to + /// be called after `preprocess`. fn postprocess( &mut self, memory: &mut MemoryController, @@ -94,9 +95,11 @@ pub trait VmAdapterAir: BaseAir { type Interface: VmAdapterInterface; /// [Air](openvm_stark_backend::p3_air::Air) constraints owned by the adapter. - /// The `interface` is given as abstract expressions so it can be directly used in other AIR constraints. + /// The `interface` is given as abstract expressions so it can be directly used in other AIR + /// constraints. /// - /// Adapters should document the max constraint degree as a function of the constraint degrees of `reads, writes, instruction`. + /// Adapters should document the max constraint degree as a function of the constraint degrees + /// of `reads, writes, instruction`. fn eval( &self, builder: &mut AB, @@ -110,9 +113,11 @@ pub trait VmAdapterAir: BaseAir { /// Trait to be implemented on primitive chip to integrate with the machine. pub trait VmCoreChip> { - /// Minimum data that must be recorded to be able to generate trace for one row of `PrimitiveAir`. + /// Minimum data that must be recorded to be able to generate trace for one row of + /// `PrimitiveAir`. type Record: Send + Serialize + DeserializeOwned; - /// The primitive AIR with main constraints that do not depend on memory and other architecture-specifics. + /// The primitive AIR with main constraints that do not depend on memory and other + /// architecture-specifics. type Air: BaseAirWithPublicValues + Clone; #[allow(clippy::type_complexity)] @@ -138,9 +143,10 @@ pub trait VmCoreChip> { fn air(&self) -> &Self::Air; - /// Finalize the trace, especially the padded rows if the all-zero rows don't satisfy the constraints. - /// This is done **after** records are consumed and the trace matrix is generated. - /// Most implementations should just leave the default implementation if padding with rows of all 0s satisfies the constraints. + /// Finalize the trace, especially the padded rows if the all-zero rows don't satisfy the + /// constraints. This is done **after** records are consumed and the trace matrix is + /// generated. Most implementations should just leave the default implementation if padding + /// with rows of all 0s satisfies the constraints. fn finalize(&self, _trace: &mut RowMajorMatrix, _num_records: usize) { // do nothing by default } @@ -394,7 +400,6 @@ where /// The most common adapter interface. /// Performs `NUM_READS` batch reads of size `READ_SIZE` and /// `NUM_WRITES` batch writes of size `WRITE_SIZE`. -/// pub struct BasicAdapterInterface< T, PI, @@ -484,7 +489,8 @@ impl< type ProcessedInstruction = MinimalInstruction; } -/// Similar to `BasicAdapterInterface`, but it flattens the reads and writes into a single flat array for each +/// Similar to `BasicAdapterInterface`, but it flattens the reads and writes into a single flat +/// array for each pub struct FlatInterface( PhantomData, PhantomData, @@ -498,8 +504,8 @@ impl VmAdapterInterfac type ProcessedInstruction = PI; } -/// An interface that is fully determined during runtime. This should **only** be used as a last resort when static -/// compile-time guarantees cannot be made. +/// An interface that is fully determined during runtime. This should **only** be used as a last +/// resort when static compile-time guarantees cannot be made. #[derive(Serialize, Deserialize)] pub struct DynAdapterInterface(PhantomData); diff --git a/crates/vm/src/arch/segment.rs b/crates/vm/src/arch/segment.rs index 56833b7e26..634632ce2b 100644 --- a/crates/vm/src/arch/segment.rs +++ b/crates/vm/src/arch/segment.rs @@ -262,7 +262,8 @@ impl> ExecutionSegment { break; } - // Some phantom instruction handling is more convenient to do here than in PhantomChip. + // Some phantom instruction handling is more convenient to do here than in + // PhantomChip. if opcode == SystemOpcode::PHANTOM.global_opcode() { // Note: the discriminant is the lower 16 bits of the c operand. let discriminant = c.as_canonical_u32() as u16; @@ -357,7 +358,8 @@ impl> ExecutionSegment { }) } - /// Returns bool of whether to switch to next segment or not. This is called every clock cycle inside of Core trace generation. + /// Returns bool of whether to switch to next segment or not. This is called every clock cycle + /// inside of Core trace generation. fn should_segment(&mut self) -> bool { if !self.system_config().continuation_enabled { return false; diff --git a/crates/vm/src/arch/testing/mod.rs b/crates/vm/src/arch/testing/mod.rs index c3aced7c33..44b19177be 100644 --- a/crates/vm/src/arch/testing/mod.rs +++ b/crates/vm/src/arch/testing/mod.rs @@ -339,7 +339,8 @@ where zip(airs, air_proof_inputs).filter(|(_, input)| input.main_trace_height() > 0), ); } - self = self.load(range_checker); // this must be last because other trace generation mutates its state + self = self.load(range_checker); // this must be last because other trace generation + // mutates its state } self } diff --git a/crates/vm/src/arch/vm.rs b/crates/vm/src/arch/vm.rs index 7c45b22dad..a826fb4137 100644 --- a/crates/vm/src/arch/vm.rs +++ b/crates/vm/src/arch/vm.rs @@ -178,6 +178,12 @@ where ); let pc = exe.pc_start; let mut state = VmExecutorNextSegmentState::new(memory, input, pc); + + #[cfg(feature = "bench-metrics")] + { + state.metrics.fn_bounds = exe.fn_bounds.clone(); + } + let mut segment_idx = 0; loop { @@ -209,7 +215,8 @@ where /// Executes a program until a segmentation happens. /// Returns the last segment and the vm state for next segment. - /// This is so that the tracegen and proving of this segment can be immediately started (on a separate machine). + /// This is so that the tracegen and proving of this segment can be immediately started (on a + /// separate machine). pub fn execute_until_segment( &self, exe: impl Into>, @@ -376,7 +383,7 @@ pub struct SingleSegmentVmExecutionResult { /// Heights of each AIR, ordered by AIR ID. pub air_heights: Vec, /// Heights of (SystemBase, Inventory), in an internal ordering. - pub internal_heights: VmComplexTraceHeights, + pub vm_heights: VmComplexTraceHeights, } impl SingleSegmentVmExecutor @@ -424,7 +431,7 @@ where segment }; let air_heights = segment.chip_complex.current_trace_heights(); - let internal_heights = segment.chip_complex.get_internal_trace_heights(); + let vm_heights = segment.chip_complex.get_internal_trace_heights(); let public_values = if let Some(pv_chip) = segment.chip_complex.public_values_chip() { pv_chip.core.get_custom_public_values() } else { @@ -433,7 +440,7 @@ where Ok(SingleSegmentVmExecutionResult { public_values, air_heights, - internal_heights, + vm_heights, }) } @@ -628,9 +635,10 @@ where .collect() } - /// Verify segment proofs, checking continuation boundary conditions between segments if VM memory is persistent - /// The behavior of this function differs depending on whether continuations is enabled or not. - /// We recommend to call the functions [`verify_segments`] or [`verify_single`] directly instead. + /// Verify segment proofs, checking continuation boundary conditions between segments if VM + /// memory is persistent The behavior of this function differs depending on whether + /// continuations is enabled or not. We recommend to call the functions [`verify_segments`] + /// or [`verify_single`] directly instead. pub fn verify( &self, vk: &MultiStarkVerifyingKey, @@ -687,9 +695,9 @@ pub struct VerifiedExecutionPayload { /// - `vk` is a valid verifying key of a VM circuit. /// /// Returns: -/// - The commitment to the [VmCommittedExe] extracted from `proofs`. -/// It is the responsibility of the caller to check that the returned commitment matches -/// the VM executable that the VM was supposed to execute. +/// - The commitment to the [VmCommittedExe] extracted from `proofs`. It is the responsibility of +/// the caller to check that the returned commitment matches the VM executable that the VM was +/// supposed to execute. /// - The Merkle root of the final memory state. /// /// ## Note diff --git a/crates/vm/src/lib.rs b/crates/vm/src/lib.rs index 685738e476..2e3ba461c5 100644 --- a/crates/vm/src/lib.rs +++ b/crates/vm/src/lib.rs @@ -11,8 +11,8 @@ pub mod arch; #[cfg(feature = "bench-metrics")] pub mod metrics; /// System chips that are always required by the architecture. -/// (The [PhantomChip](system::phantom::PhantomChip) is not technically required for a functioning VM, -/// but there is almost always a need for it.) +/// (The [PhantomChip](system::phantom::PhantomChip) is not technically required for a functioning +/// VM, but there is almost always a need for it.) pub mod system; /// Utility functions and test utils pub mod utils; diff --git a/crates/vm/src/metrics/cycle_tracker/mod.rs b/crates/vm/src/metrics/cycle_tracker/mod.rs index 06cbe09193..9b435d93e4 100644 --- a/crates/vm/src/metrics/cycle_tracker/mod.rs +++ b/crates/vm/src/metrics/cycle_tracker/mod.rs @@ -10,7 +10,8 @@ impl CycleTracker { } /// Starts a new cycle tracker span for the given name. - /// If a span already exists for the given name, it ends the existing span and pushes a new one to the vec. + /// If a span already exists for the given name, it ends the existing span and pushes a new one + /// to the vec. pub fn start(&mut self, mut name: String) { // hack to remove "CT-" prefix if name.starts_with("CT-") { diff --git a/crates/vm/src/system/connector/mod.rs b/crates/vm/src/system/connector/mod.rs index 8044a48451..dc9ff88ea2 100644 --- a/crates/vm/src/system/connector/mod.rs +++ b/crates/vm/src/system/connector/mod.rs @@ -161,10 +161,12 @@ impl Air (AB::Expr::ONE - prep_local[0]) * end.is_terminate, ); - // The following constraints hold on every row, so we rename `begin` to `local` to avoid confusion. + // The following constraints hold on every row, so we rename `begin` to `local` to avoid + // confusion. let local = begin; - // We decompose and range check `local.timestamp` as `timestamp_low_limb, timestamp_high_limb` where - // `timestamp = timestamp_low_limb + timestamp_high_limb * 2^range_max_bits`. + // We decompose and range check `local.timestamp` as `timestamp_low_limb, + // timestamp_high_limb` where `timestamp = timestamp_low_limb + timestamp_high_limb + // * 2^range_max_bits`. let (low_bits, high_bits) = self.timestamp_limb_bits(); let high_limb = (local.timestamp - local.timestamp_low_limb) * AB::F::ONE.div_2exp_u64(self.range_bus.range_max_bits as u64); diff --git a/crates/vm/src/system/memory/adapter/air.rs b/crates/vm/src/system/memory/adapter/air.rs index 8bb9fdb699..bcc367b0ec 100644 --- a/crates/vm/src/system/memory/adapter/air.rs +++ b/crates/vm/src/system/memory/adapter/air.rs @@ -67,7 +67,8 @@ impl Air for AccessAdapterAir { // assuming valid: // Split = 1 => direction = 1 => receive parent with count 1, send left/right with count 1 - // Split = 0 => direction = -1 => receive parent with count -1, send left/right with count -1 + // Split = 0 => direction = -1 => receive parent with count -1, send left/right with count + // -1 let direction = local.is_valid * (AB::Expr::TWO * local.is_split - AB::Expr::ONE); self.memory_bus diff --git a/crates/vm/src/system/memory/controller/mod.rs b/crates/vm/src/system/memory/controller/mod.rs index 9c2a3497b3..680a03ab8e 100644 --- a/crates/vm/src/system/memory/controller/mod.rs +++ b/crates/vm/src/system/memory/controller/mod.rs @@ -457,9 +457,9 @@ impl MemoryController { fn replay_access_log(&mut self) { let log = mem::take(&mut self.memory.log); if log.is_empty() { - // Online memory logs may be empty, but offline memory may be replayed from external sources. - // In these cases, we skip the calls to replay access logs because `set_log_capacity` would - // panic. + // Online memory logs may be empty, but offline memory may be replayed from external + // sources. In these cases, we skip the calls to replay access logs because + // `set_log_capacity` would panic. tracing::debug!("skipping replay_access_log"); return; } @@ -477,7 +477,8 @@ impl MemoryController { } } - /// Low-level API to replay a single memory access log entry and populate the [OfflineMemory], [MemoryInterface], and `AccessAdapterInventory`. + /// Low-level API to replay a single memory access log entry and populate the [OfflineMemory], + /// [MemoryInterface], and `AccessAdapterInventory`. pub fn replay_access( entry: MemoryLogEntry, offline_memory: &mut OfflineMemory, @@ -720,7 +721,8 @@ pub struct MemoryAuxColsFactory { pub(crate) _marker: PhantomData, } -// NOTE[jpw]: The `make_*_aux_cols` functions should be thread-safe so they can be used in parallelized trace generation. +// NOTE[jpw]: The `make_*_aux_cols` functions should be thread-safe so they can be used in +// parallelized trace generation. impl MemoryAuxColsFactory { pub fn generate_read_aux(&self, read: &MemoryRecord, buffer: &mut MemoryReadAuxCols) { assert!( diff --git a/crates/vm/src/system/memory/merkle/air.rs b/crates/vm/src/system/memory/merkle/air.rs index 77877ffe87..003aa31f78 100644 --- a/crates/vm/src/system/memory/merkle/air.rs +++ b/crates/vm/src/system/memory/merkle/air.rs @@ -75,7 +75,8 @@ impl Ai // row with least height should have `height_section` = 0, `is_root` = 0 builder.when_last_row().assert_zero(local.height_section); builder.when_last_row().assert_zero(local.is_root); - // `height_section` changes from 0 to 1 only when `parent_height` changes from `address_height` to `address_height` + 1 + // `height_section` changes from 0 to 1 only when `parent_height` changes from + // `address_height` to `address_height` + 1 builder .when_transition() .when_ne( diff --git a/crates/vm/src/system/memory/merkle/mod.rs b/crates/vm/src/system/memory/merkle/mod.rs index 30728a32d7..74f8951bc4 100644 --- a/crates/vm/src/system/memory/merkle/mod.rs +++ b/crates/vm/src/system/memory/merkle/mod.rs @@ -28,7 +28,8 @@ struct FinalState { } impl MemoryMerkleChip { - /// `compression_bus` is the bus for direct (no-memory involved) interactions to call the cryptographic compression function. + /// `compression_bus` is the bus for direct (no-memory involved) interactions to call the + /// cryptographic compression function. pub fn new( memory_dimensions: MemoryDimensions, merkle_bus: PermutationCheckBus, diff --git a/crates/vm/src/system/memory/offline_checker/bridge.rs b/crates/vm/src/system/memory/offline_checker/bridge.rs index 44705d15b6..2c7e180cfb 100644 --- a/crates/vm/src/system/memory/offline_checker/bridge.rs +++ b/crates/vm/src/system/memory/offline_checker/bridge.rs @@ -17,14 +17,14 @@ use crate::system::memory::{ MemoryAddress, }; -/// AUX_LEN is the number of auxiliary columns (aka the number of limbs that the input numbers will be decomposed into) -/// for the `AssertLtSubAir` in the `MemoryOfflineChecker`. +/// AUX_LEN is the number of auxiliary columns (aka the number of limbs that the input numbers will +/// be decomposed into) for the `AssertLtSubAir` in the `MemoryOfflineChecker`. /// Warning: This requires that (clk_max_bits + decomp - 1) / decomp = AUX_LEN /// in MemoryOfflineChecker (or whenever AssertLtSubAir is used) pub(crate) const AUX_LEN: usize = 2; -/// The [MemoryBridge] is used within AIR evaluation functions to constrain logical memory operations (read/write). -/// It adds all necessary constraints and interactions. +/// The [MemoryBridge] is used within AIR evaluation functions to constrain logical memory +/// operations (read/write). It adds all necessary constraints and interactions. #[derive(Clone, Copy, Debug)] pub struct MemoryBridge { offline_checker: MemoryOfflineChecker, @@ -212,8 +212,8 @@ impl> MemoryReadOrImmediateOperation<'_, F, V } } -/// Constraints and interactions for a logical memory write of `(address, data)` at time `timestamp`. -/// This reads `(address, data_prev, timestamp_prev)` from the memory bus and writes +/// Constraints and interactions for a logical memory write of `(address, data)` at time +/// `timestamp`. This reads `(address, data_prev, timestamp_prev)` from the memory bus and writes /// `(address, data, timestamp)` to the memory bus. /// Includes constraints for `timestamp_prev < timestamp`. /// diff --git a/crates/vm/src/system/memory/offline_checker/columns.rs b/crates/vm/src/system/memory/offline_checker/columns.rs index 5ba1b8da6c..5a27b3e433 100644 --- a/crates/vm/src/system/memory/offline_checker/columns.rs +++ b/crates/vm/src/system/memory/offline_checker/columns.rs @@ -95,8 +95,10 @@ pub struct MemoryReadOrImmediateAuxCols { impl AsRef> for MemoryWriteAuxCols { fn as_ref(&self) -> &MemoryReadAuxCols { // Safety: - // - `MemoryReadAuxCols` is repr(C) and its only field is the first field of `MemoryWriteAuxCols`. - // - Thus, the memory layout of `MemoryWriteAuxCols` begins with a valid `MemoryReadAuxCols`. + // - `MemoryReadAuxCols` is repr(C) and its only field is the first field of + // `MemoryWriteAuxCols`. + // - Thus, the memory layout of `MemoryWriteAuxCols` begins with a valid + // `MemoryReadAuxCols`. unsafe { &*(self as *const MemoryWriteAuxCols as *const MemoryReadAuxCols) } } } diff --git a/crates/vm/src/system/memory/persistent.rs b/crates/vm/src/system/memory/persistent.rs index 8936685080..45f9fe9dbd 100644 --- a/crates/vm/src/system/memory/persistent.rs +++ b/crates/vm/src/system/memory/persistent.rs @@ -49,7 +49,8 @@ pub struct PersistentBoundaryCols { /// /// Sends the following interactions: /// - if `expand_direction` is 1, sends `[0, 0, address_space_label, leaf_label]` to `merkle_bus`. -/// - if `expand_direction` is -1, receives `[1, 0, address_space_label, leaf_label]` from `merkle_bus`. +/// - if `expand_direction` is -1, receives `[1, 0, address_space_label, leaf_label]` from +/// `merkle_bus`. #[derive(Clone, Debug)] pub struct PersistentBoundaryAir { pub memory_dims: MemoryDimensions, diff --git a/crates/vm/src/system/memory/volatile/mod.rs b/crates/vm/src/system/memory/volatile/mod.rs index 981624964f..e01162c789 100644 --- a/crates/vm/src/system/memory/volatile/mod.rs +++ b/crates/vm/src/system/memory/volatile/mod.rs @@ -139,7 +139,8 @@ impl Air for VolatileBoundaryAir { .eval(builder, local.is_valid); } let range_max_bits = self.range_bus().range_max_bits; - // Compose addr_space_limbs and pointer_limbs into addr_space, pointer for both local and next + // Compose addr_space_limbs and pointer_limbs into addr_space, pointer for both local and + // next let [addr_space, next_addr_space] = [&local.addr_space_limbs, &next.addr_space_limbs] .map(|limbs| compose::(limbs, range_max_bits)); let [pointer, next_pointer] = [&local.pointer_limbs, &next.pointer_limbs] @@ -153,7 +154,8 @@ impl Air for VolatileBoundaryAir { out: AB::Expr::ONE, count: next.is_valid.into(), }; - // N.B.: this will do range checks (but not other constraints) on the last row if the first row has is_valid = 1 due to wraparound + // N.B.: this will do range checks (but not other constraints) on the last row if the first + // row has is_valid = 1 due to wraparound self.addr_lt_air .eval(builder, (lt_io, (&local.addr_lt_aux).into())); @@ -214,8 +216,8 @@ impl VolatileBoundaryChip { pub fn set_overridden_height(&mut self, overridden_height: usize) { self.overridden_height = Some(overridden_height); } - /// Volatile memory requires the starting and final memory to be in equipartition with block size `1`. - /// When block size is `1`, then the `label` is the same as the address pointer. + /// Volatile memory requires the starting and final memory to be in equipartition with block + /// size `1`. When block size is `1`, then the `label` is the same as the address pointer. pub fn finalize(&mut self, final_memory: TimestampedEquipartition) { self.final_memory = Some(final_memory); } @@ -230,8 +232,9 @@ where } fn generate_air_proof_input(self) -> AirProofInput { - // Volatile memory requires the starting and final memory to be in equipartition with block size `1`. - // When block size is `1`, then the `label` is the same as the address pointer. + // Volatile memory requires the starting and final memory to be in equipartition with block + // size `1`. When block size is `1`, then the `label` is the same as the address + // pointer. let width = self.trace_width(); let air = Arc::new(self.air); let final_memory = self diff --git a/crates/vm/src/system/phantom/mod.rs b/crates/vm/src/system/phantom/mod.rs index 9da168c75b..28977fe2cd 100644 --- a/crates/vm/src/system/phantom/mod.rs +++ b/crates/vm/src/system/phantom/mod.rs @@ -36,7 +36,8 @@ use crate::{ mod tests; /// PhantomAir still needs columns for each nonzero operand in a phantom instruction. -/// We currently allow `a,b,c` where the lower 16 bits of `c` are used as the [PhantomInstruction] discriminant. +/// We currently allow `a,b,c` where the lower 16 bits of `c` are used as the [PhantomInstruction] +/// discriminant. const NUM_PHANTOM_OPERANDS: usize = 3; #[derive(Clone, Debug)] diff --git a/crates/vm/src/system/poseidon2/air.rs b/crates/vm/src/system/poseidon2/air.rs index 124892ca32..99769d253d 100644 --- a/crates/vm/src/system/poseidon2/air.rs +++ b/crates/vm/src/system/poseidon2/air.rs @@ -17,8 +17,8 @@ use super::columns::Poseidon2PeripheryCols; /// Poseidon2 Air, VM version. /// -/// Carries the subair for subtrace generation. Sticking to the conventions, this struct carries no state. -/// `direct` determines whether direct interactions are enabled. By default they are on. +/// Carries the subair for subtrace generation. Sticking to the conventions, this struct carries no +/// state. `direct` determines whether direct interactions are enabled. By default they are on. #[derive(Clone, new, Debug)] pub struct Poseidon2PeripheryAir { pub(super) subair: Arc>, diff --git a/crates/vm/src/system/poseidon2/chip.rs b/crates/vm/src/system/poseidon2/chip.rs index 8ddfa982df..e0059f1ce1 100644 --- a/crates/vm/src/system/poseidon2/chip.rs +++ b/crates/vm/src/system/poseidon2/chip.rs @@ -58,7 +58,8 @@ impl HasherChip { } impl PublicValuesCoreChip { - /// **Note:** `max_degree` is the maximum degree of the constraint polynomials to represent the flags. - /// If you want the overall AIR's constraint degree to be `<= max_constraint_degree`, then typically - /// you should set `max_degree` to `max_constraint_degree - 1`. + /// **Note:** `max_degree` is the maximum degree of the constraint polynomials to represent the + /// flags. If you want the overall AIR's constraint degree to be `<= max_constraint_degree`, + /// then typically you should set `max_degree` to `max_constraint_degree - 1`. pub fn new(num_custom_pvs: usize, max_degree: u32) -> Self { Self { air: PublicValuesCoreAir::new(num_custom_pvs, max_degree), diff --git a/crates/vm/tests/integration_test.rs b/crates/vm/tests/integration_test.rs index 42aebe4584..168d756111 100644 --- a/crates/vm/tests/integration_test.rs +++ b/crates/vm/tests/integration_test.rs @@ -132,7 +132,7 @@ fn test_vm_override_executor_height() { .unwrap(); // Memory trace heights are not computed during execution. assert_eq!( - res.internal_heights.system, + res.vm_heights.system, SystemTraceHeights { memory: MemoryTraceHeights::Volatile(VolatileMemoryTraceHeights { boundary: 1, @@ -141,7 +141,7 @@ fn test_vm_override_executor_height() { } ); assert_eq!( - res.internal_heights.inventory, + res.vm_heights.inventory, VmInventoryTraceHeights { chips: vec![ (ChipId::Executor(0), 0), @@ -209,8 +209,8 @@ fn test_vm_override_executor_height() { #[test] fn test_vm_1_optional_air() { - // Aggregation VmConfig has Core/Poseidon2/FieldArithmetic/FieldExtension chips. The program only - // uses Core and FieldArithmetic. All other chips should not have AIR proof inputs. + // Aggregation VmConfig has Core/Poseidon2/FieldArithmetic/FieldExtension chips. The program + // only uses Core and FieldArithmetic. All other chips should not have AIR proof inputs. let config = NativeConfig::aggregation(4, 3); let engine = BabyBearPoseidon2Engine::new(standard_fri_params_with_100_bits_conjectured_security(3)); @@ -381,8 +381,8 @@ fn test_vm_1_persistent() { merkle_air_proof_input.raw.public_values[..8], // The value when you start with zeros and repeatedly hash the value with itself // ptr_max_bits + as_height - 2 times. - // The height of the tree is ptr_max_bits + as_height - log2(8). The leaf also must be hashed once - // with padding for security. + // The height of the tree is ptr_max_bits + as_height - log2(8). The leaf also must be + // hashed once with padding for security. digest ); } @@ -490,7 +490,8 @@ fn test_vm_fibonacci_old() { #[test] fn test_vm_fibonacci_old_cycle_tracker() { - // NOTE: Instructions commented until cycle tracker instructions are not counted as additional assembly Instructions + // NOTE: Instructions commented until cycle tracker instructions are not counted as additional + // assembly Instructions let instructions = vec![ Instruction::debug(PhantomDiscriminant(SysPhantom::CtStart as u16)), Instruction::debug(PhantomDiscriminant(SysPhantom::CtStart as u16)), diff --git a/docs/crates/benchmarks.md b/docs/crates/benchmarks.md index 545565698f..1f51a16681 100644 --- a/docs/crates/benchmarks.md +++ b/docs/crates/benchmarks.md @@ -1,6 +1,6 @@ # Benchmarks -Documentation for the `openvm-benchmarks` crate. By default, paths will be referenced from the [`benchmarks`](../../benchmarks) directory. +Documentation for the `openvm-benchmarks-*` crates. By default, paths will be referenced from the [`benchmarks`](../../benchmarks) directory. - Table of Contents - [Latest Benchmark Results](#latest-benchmark-results) @@ -16,7 +16,7 @@ These are run via [github workflows](../../.github/workflows/benchmarks.yml) and ## How to Add a Benchmark -1. Add a new crate to the [programs](../../benchmarks/programs/) directory. +1. Add a new crate to the [guest](../../benchmarks/guest/) directory. 2. Add the [benchmark to CI](#adding-a-benchmark-to-ci). This is called a "guest program" because it is intended to be run on the OpenVM architecture and @@ -36,21 +36,21 @@ To support host machine execution, the top of your guest program should have: #![cfg_attr(not(feature = "std"), no_std)] ``` -You can copy from [fibonacci](../../benchmarks/programs/fibonacci) to get started. +You can copy from [fibonacci](../../benchmarks/guest/fibonacci) to get started. The guest program crate should **not** be included in the main repository workspace. Instead the guest `Cargo.toml` should have `[workspace]` at the top to keep it standalone. Your IDE will likely not lint or use rust-analyzer on the crate while in the workspace, so the recommended setup is to open a separate IDE workspace from the directory of the guest program. ### Adding the Benchmark -Our proving benchmarks are written as standalone rust binaries. Add one by making a new file in [bin](../../benchmarks/src/bin) by following the [fibonacci example](../../benchmarks/src/bin/fibonacci.rs). We currently only run aggregation proofs when feature "aggregation" is on (off by default). Any general benchmarking utility functions can be added to the library in [`src`](../../benchmarks/src). There are utility functions `build_bench_program` which compiles the guest program crate with target set to `openvm` and reads the output RISC-V ELF file. +Our proving benchmarks are written as standalone rust binaries. Add one by making a new file in [bin](../../benchmarks/prove/src/bin) by following the [fibonacci example](../../benchmarks/prove/src/bin/fibonacci.rs). We currently only run aggregation proofs when feature "aggregation" is on (off by default). Any general benchmarking utility functions can be added to the library in [`src`](../../benchmarks/utils/src). There are utility functions `build_bench_program` which compiles the guest program crate with target set to `openvm` and reads the output RISC-V ELF file. This can then be fed into `bench_from_exe` which will generate a proof of the execution of the ELF (any other `VmExe`) from a given `VmConfig`. #### Providing Inputs Inputs must be directly provided to the `bench_from_exe` function: the `input_stream: Vec>` is a vector of vectors, where `input_stream[i]` will be what is provided to the guest program on the `i`-th call of `openvm::io::read_vec()`. Currently you must manually convert from `u8` to `F` using `FieldAlgebra::from_canonical_u8`. -You can find an example of passing in a single `Vec` input in [base64_json](../../benchmarks/src/bin/base64_json.rs). +You can find an example of passing in a single `Vec` input in [base64_json](../../benchmarks/prove/src/bin/base64_json.rs). #### Testing the Guest Program @@ -89,7 +89,7 @@ Running a benchmark locally is simple. Just run the following command: OUTPUT_PATH="metrics.json" cargo run --release --bin ``` -where `.rs` is one of the files in [`src/bin`](../../benchmarks/src/bin). +where `.rs` is one of the files in [`src/bin`](../../benchmarks/prove/src/bin). The `OUTPUT_PATH` environmental variable should be set to the file path where you want the collected metrics to be written to. If unset, then metrics are not printed to file. To run a benchmark with the leaf aggregation, add `--features aggregation` to the above command. @@ -151,13 +151,58 @@ To add the benchmark to CI, update the [ci/benchmark-config.json](../../ci/bench The `benchmarks.yml` file reads this JSON and generates a matrix of inputs for the [.github/workflows/benchmark-call.yml](../../.github/workflows/benchmark-call.yml) file, a reusable workflow for running the benchmark, collecting metrics, and storing and displaying results. +## Execution Benchmarks + +The crate [`openvm-benchmarks-execute`](../../benchmarks/execute) contains benchmarks for measuring the raw VM execution performance without proving. It includes a CLI tool that allows running various pre-defined benchmark programs to evaluate execution time. Note that this tool doesn't compile the guest ELF files and requires them to be precompiled before running the benchmarks. + +### Using the CLI + +The CLI provides several options for running execution benchmarks: + +```bash +# Run all benchmark programs +cargo run --package openvm-benchmarks-execute + +# List all available benchmark programs +cargo run --package openvm-benchmarks-execute -- --list + +# Run specific benchmark programs +cargo run --package openvm-benchmarks-execute -- --programs fibonacci_recursive fibonacci_iterative + +# Run all benchmark programs except specified ones +cargo run --package openvm-benchmarks-execute -- --skip keccak256 sha256 +``` + +These benchmarks measure pure execution time without proving, making them useful for isolating performance bottlenecks in the VM runtime itself. + +### Updating the ELFs + +For execution benchmarks, the ELF files need to be compiled before running the benchmarks. The [`openvm-benchmarks-utils`](../../benchmarks/utils) crate provides a CLI tool to build all the benchmark ELFs: + +```bash +# Build all benchmark ELFs +cargo run --package openvm-benchmarks-utils --bin build-elfs --features build-binaries + +# Build specific benchmark ELFs +cargo run --package openvm-benchmarks-utils --bin build-elfs --features build-binaries -- fibonacci_recursive fibonacci_iterative + +# Skip specific programs +cargo run --package openvm-benchmarks-utils --bin build-elfs --features build-binaries -- --skip keccak256 sha256 + +# Force rebuild even if ELFs already exist (overwrite) +cargo run --package openvm-benchmarks-utils --bin build-elfs --features build-binaries -- --force + +# Set build profile (debug or release) +cargo run --package openvm-benchmarks-utils --bin build-elfs --features build-binaries -- --profile debug +``` + ## Profiling Execution The following section discusses traditional profiling of the VM runtime execution, without ZK proving. ### Criterion Benchmarks -Most benchmarks are binaries that run once since proving benchmarks take longer. For smaller benchmarks, such as to benchmark VM runtime, we use Criterion. These are in the [`benches`](../../benchmarks/benches) directory. +Most benchmarks are binaries that run once since proving benchmarks take longer. For smaller benchmarks, such as to benchmark VM runtime, we use Criterion. These are in the [`benches`](../../benchmarks/execute/benches) directory. ```bash cargo bench --bench fibonacci_execute @@ -166,7 +211,7 @@ cargo bench --bench regex_execute will run the normal criterion benchmark. -We profile using executables without criterion in [`examples`](../../benchmarks/examples). To prevent the ELF build time from being included in the benchmark, we pre-build the ELF using the CLI. Check that the included ELF file in `examples` is up to date before proceeding. +We profile using executables without criterion in [`examples`](../../benchmarks/execute/examples). To prevent the ELF build time from being included in the benchmark, we pre-build the ELF using the CLI. Check that the included ELF file in `examples` is up to date before proceeding. ### Flamegraph diff --git a/docs/specs/memory.md b/docs/specs/memory.md index df8fd59931..3d8ea1dc1d 100644 --- a/docs/specs/memory.md +++ b/docs/specs/memory.md @@ -97,7 +97,7 @@ The _split_ and _merge_ operations for a subsegment of length `N` are handled by - Of course, merging two halves creates a subsegment with the timestamp being the maximal of two timestamps for the halves. - Splitting, however, just makes all child timestamps equal to the former timestamp of the segment being split. - All these timestamp conditions are checked in the `AccessAdapterAir`. -- When merging two segments `[l, r)` and `[m, r)`, the `AccessAdapterAir` sends to **memory bus** the information about `[l, r)` and receives the information about `[l, m)` and about `[m, r)`, all with multiplicity 1. Splitting does the same, but with multiplicity -1 (or, in other words, receives about `[l, r)` and sends about `[l, m)` and `[m, r)`). +- When merging two segments `[l, m)` and `[m, r)`, the `AccessAdapterAir` sends to **memory bus** the information about `[l, r)` and receives the information about `[l, m)` and about `[m, r)`, all with multiplicity 1. Splitting does the same, but with multiplicity -1 (or, in other words, receives about `[l, r)` and sends about `[l, m)` and `[m, r)`). - The information about `[l, r)` sent to the bus is, in this order: - address space, - `l`, diff --git a/extensions/algebra/circuit/src/config.rs b/extensions/algebra/circuit/src/config.rs index a47d2ce171..3f2e83fef3 100644 --- a/extensions/algebra/circuit/src/config.rs +++ b/extensions/algebra/circuit/src/config.rs @@ -1,10 +1,6 @@ -use derive_more::derive::From; use num_bigint::BigUint; -use openvm_circuit::arch::{ - SystemConfig, SystemExecutor, SystemPeriphery, VmChipComplex, VmConfig, VmInventoryError, -}; -use openvm_circuit_derive::{AnyEnum, InstructionExecutor, VmConfig}; -use openvm_circuit_primitives_derive::{Chip, ChipUsageGetter}; +use openvm_circuit::arch::SystemConfig; +use openvm_circuit_derive::VmConfig; use openvm_rv32im_circuit::*; use openvm_stark_backend::p3_field::PrimeField32; use serde::{Deserialize, Serialize}; diff --git a/extensions/algebra/circuit/src/fp2.rs b/extensions/algebra/circuit/src/fp2.rs index 9cdf5663e5..48fcd535d5 100644 --- a/extensions/algebra/circuit/src/fp2.rs +++ b/extensions/algebra/circuit/src/fp2.rs @@ -2,7 +2,8 @@ use std::{cell::RefCell, rc::Rc}; use openvm_mod_circuit_builder::{ExprBuilder, FieldVariable, SymbolicExpr}; -/// Quadratic field extension of `Fp` defined by `Fp2 = Fp[u]/(1 + u^2)`. Assumes that `-1` is not a quadratic residue in `Fp`, which is equivalent to `p` being congruent to `3 (mod 4)`. +/// Quadratic field extension of `Fp` defined by `Fp2 = Fp[u]/(1 + u^2)`. Assumes that `-1` is not a +/// quadratic residue in `Fp`, which is equivalent to `p` being congruent to `3 (mod 4)`. /// Extends Mod Builder to work with Fp2 variables. #[derive(Clone)] pub struct Fp2 { @@ -91,8 +92,9 @@ impl Fp2 { // (1) x0 = y0*z0 - y1*z1 and // (2) x1 = y1*z0 + y0*z1 // which implies z0 and z1 are computed as above. - // Observe (1)*y0 + (2)*y1 yields x0*y0 + x1*y1 = z0(y0^2 + y1^2) and so z0 = (x0*y0 + x1*y1) / (y0^2 + y1^2) as needed. - // Observe (1)*(-y1) + (2)*y0 yields x1*y0 - x0*y1 = z1(y0^2 + y1^2) and so z1 = (x1*y0 - x0*y1) / (y0^2 + y1^2) as needed. + // Observe (1)*y0 + (2)*y1 yields x0*y0 + x1*y1 = z0(y0^2 + y1^2) and so z0 = (x0*y0 + + // x1*y1) / (y0^2 + y1^2) as needed. Observe (1)*(-y1) + (2)*y0 yields x1*y0 - x0*y1 + // = z1(y0^2 + y1^2) and so z1 = (x1*y0 - x0*y1) / (y0^2 + y1^2) as needed. // Constraint 1: x0 = y0*z0 - y1*z1 let constraint1 = &self.c0.expr - &other.c0.expr * &fake_z0 + &other.c1.expr * &fake_z1; diff --git a/extensions/algebra/circuit/src/modular_chip/is_eq.rs b/extensions/algebra/circuit/src/modular_chip/is_eq.rs index 854ba1e5ea..fe91585466 100644 --- a/extensions/algebra/circuit/src/modular_chip/is_eq.rs +++ b/extensions/algebra/circuit/src/modular_chip/is_eq.rs @@ -47,11 +47,11 @@ pub struct ModularIsEqualCoreCols { // Define c_diff_idx analogously. Then let b_lt_diff = N[b_diff_idx] - b[b_diff_idx] and // c_lt_diff = N[c_diff_idx] - c[c_diff_idx], where both must be in [0, 2^LIMB_BITS). // - // To constrain the above, we will use lt_marker, which will indicate where b_diff_idx and c_diff_idx are. - // Set lt_marker[b_diff_idx] = 1, lt_marker[c_diff_idx] = c_lt_mark, and 0 everywhere - // else. If b_diff_idx == c_diff_idx then c_lt_mark = 1, else c_lt_mark = 2. The purpose of - // c_lt_mark is to handle the edge case where b_diff_idx == c_diff_idx (because we cannot set - // lt_marker[b_diff_idx] to 1 and 2 at the same time). + // To constrain the above, we will use lt_marker, which will indicate where b_diff_idx and + // c_diff_idx are. Set lt_marker[b_diff_idx] = 1, lt_marker[c_diff_idx] = c_lt_mark, and 0 + // everywhere else. If b_diff_idx == c_diff_idx then c_lt_mark = 1, else c_lt_mark = 2. The + // purpose of c_lt_mark is to handle the edge case where b_diff_idx == c_diff_idx (because + // we cannot set lt_marker[b_diff_idx] to 1 and 2 at the same time). pub lt_marker: [T; READ_LIMBS], pub b_lt_diff: T, pub c_lt_diff: T, @@ -204,16 +204,20 @@ where // Constrain b < N. // First, we constrain b[i] = N[i] for i > b_diff_idx. - // We do this by constraining that b[i] = N[i] when prefix_sum is not 1 or lt_marker_sum. - // - If is_setup = 0, then lt_marker_sum is either 1 or 3. In this case, prefix_sum is 0, 1, 2, or 3. - // It can be verified by casework that i > b_diff_idx iff prefix_sum is not 1 or lt_marker_sum. - // - If is_setup = 1, then we want to constrain b[i] = N[i] for all i. In this case, lt_marker_sum is 2 - // and prefix_sum is 0 or 2. So we constrain b[i] = N[i] when prefix_sum is not 1, which works. + // We do this by constraining that b[i] = N[i] when prefix_sum is not 1 or + // lt_marker_sum. + // - If is_setup = 0, then lt_marker_sum is either 1 or 3. In this case, prefix_sum is + // 0, 1, 2, or 3. It can be verified by casework that i > b_diff_idx iff prefix_sum + // is not 1 or lt_marker_sum. + // - If is_setup = 1, then we want to constrain b[i] = N[i] for all i. In this case, + // lt_marker_sum is 2 and prefix_sum is 0 or 2. So we constrain b[i] = N[i] when + // prefix_sum is not 1, which works. builder .when_ne(prefix_sum.clone(), AB::F::ONE) .when_ne(prefix_sum.clone(), lt_marker_sum.clone() - cols.is_setup) .assert_eq(cols.b[i], modulus[i]); - // Note that lt_marker[i] is either 0, 1, or 2 and lt_marker[i] being 1 indicates b[i] < N[i] (i.e. i == b_diff_idx). + // Note that lt_marker[i] is either 0, 1, or 2 and lt_marker[i] being 1 indicates b[i] < + // N[i] (i.e. i == b_diff_idx). builder .when_ne(cols.lt_marker[i], AB::F::ZERO) .when_ne(cols.lt_marker[i], AB::F::from_canonical_u8(2)) @@ -221,14 +225,16 @@ where // Constrain c < N. // First, we constrain c[i] = N[i] for i > c_diff_idx. - // We do this by constraining that c[i] = N[i] when prefix_sum is not c_lt_mark or lt_marker_sum. - // It can be verified by casework that i > c_diff_idx iff prefix_sum is not c_lt_mark or lt_marker_sum. + // We do this by constraining that c[i] = N[i] when prefix_sum is not c_lt_mark or + // lt_marker_sum. It can be verified by casework that i > c_diff_idx iff + // prefix_sum is not c_lt_mark or lt_marker_sum. builder .when_ne(prefix_sum.clone(), cols.c_lt_mark) .when_ne(prefix_sum.clone(), lt_marker_sum.clone()) .assert_eq(cols.c[i], modulus[i]); - // Note that lt_marker[i] is either 0, 1, or 2 and lt_marker[i] being c_lt_mark indicates c[i] < N[i] (i.e. i == c_diff_idx). - // Since c_lt_mark is 1 or 2, we have {0, 1, 2} \ {0, 3 - c_lt_mark} = {c_lt_mark}. + // Note that lt_marker[i] is either 0, 1, or 2 and lt_marker[i] being c_lt_mark + // indicates c[i] < N[i] (i.e. i == c_diff_idx). Since c_lt_mark is 1 or 2, + // we have {0, 1, 2} \ {0, 3 - c_lt_mark} = {c_lt_mark}. builder .when_ne(cols.lt_marker[i], AB::F::ZERO) .when_ne( diff --git a/extensions/algebra/circuit/src/modular_chip/muldiv.rs b/extensions/algebra/circuit/src/modular_chip/muldiv.rs index fe5ca49cdd..30f063e2b1 100644 --- a/extensions/algebra/circuit/src/modular_chip/muldiv.rs +++ b/extensions/algebra/circuit/src/modular_chip/muldiv.rs @@ -33,7 +33,8 @@ pub fn muldiv_expr( // constraint is x * y = z, or z * y = x let lvar = FieldVariable::select(is_mul_flag, &x, &z); let rvar = FieldVariable::select(is_mul_flag, &z, &x); - // When it's SETUP op, x = p == 0, y = 0, both flags are false, and it still works: z * 0 - x = 0, whatever z is. + // When it's SETUP op, x = p == 0, y = 0, both flags are false, and it still works: z * 0 - x = + // 0, whatever z is. let constraint = lvar * y.clone() - rvar; builder.borrow_mut().set_constraint(z_idx, constraint.expr); let compute = SymbolicExpr::Select( diff --git a/extensions/algebra/circuit/src/modular_chip/tests.rs b/extensions/algebra/circuit/src/modular_chip/tests.rs index bc3520cd76..1ad3310f76 100644 --- a/extensions/algebra/circuit/src/modular_chip/tests.rs +++ b/extensions/algebra/circuit/src/modular_chip/tests.rs @@ -462,7 +462,8 @@ where F::from_canonical_u32(self.chip.air.modulus_limbs[READ_LIMBS - 1]) - record.b[READ_LIMBS - 1]; } else if record.b[0] == F::from_canonical_u32(2) { - // test the constraint that b[i] = N[i] for all i when prefix_sum is not 1 or lt_marker_sum - is_setup + // test the constraint that b[i] = N[i] for all i when prefix_sum is not 1 or + // lt_marker_sum - is_setup row_slice.c_lt_mark = F::from_canonical_u8(2); row_slice.lt_marker = [F::ZERO; READ_LIMBS]; row_slice.lt_marker[READ_LIMBS - 1] = F::from_canonical_u8(2); @@ -499,7 +500,8 @@ fn test_is_equal_setup_bad< >( opcode_offset: usize, modulus: BigUint, - b_val: u32, // used to select which bug to test. currently only 1, 2, and 3 are supported (because there are three bugs to test) + b_val: u32, /* used to select which bug to test. currently only 1, 2, and 3 are supported + * (because there are three bugs to test) */ ) { let bitwise_bus = BitwiseOperationLookupBus::new(BITWISE_OP_LOOKUP_BUS); let bitwise_chip = SharedBitwiseOperationLookupChip::::new(bitwise_bus); diff --git a/extensions/algebra/complex-macros/src/lib.rs b/extensions/algebra/complex-macros/src/lib.rs index 884f819d24..d829abeeed 100644 --- a/extensions/algebra/complex-macros/src/lib.rs +++ b/extensions/algebra/complex-macros/src/lib.rs @@ -519,8 +519,9 @@ pub fn complex_declare(input: TokenStream) -> TokenStream { /// /// complex_init!(Complex2 { mod_idx = 1 }, Complex1 { mod_idx = 0 }); /// ``` -/// In particular, the order of complex types in the macro doesn't have to match the order of moduli in `moduli_init!`, -/// but they should be accompanied by the `mod_idx` corresponding to the order in the `moduli_init!` macro (not `moduli_declare!`). +/// In particular, the order of complex types in the macro doesn't have to match the order of moduli +/// in `moduli_init!`, but they should be accompanied by the `mod_idx` corresponding to the order in +/// the `moduli_init!` macro (not `moduli_declare!`). #[proc_macro] pub fn complex_init(input: TokenStream) -> TokenStream { let MacroArgs { items } = parse_macro_input!(input as MacroArgs); diff --git a/extensions/algebra/guest/src/field/mod.rs b/extensions/algebra/guest/src/field/mod.rs index d9d0d678e8..326bd08838 100644 --- a/extensions/algebra/guest/src/field/mod.rs +++ b/extensions/algebra/guest/src/field/mod.rs @@ -6,7 +6,8 @@ use core::{ use crate::{DivAssignUnsafe, DivUnsafe}; -// TODO[jpw]: the shared parts of Field and IntMod should be moved into a new `IntegralDomain` trait. +// TODO[jpw]: the shared parts of Field and IntMod should be moved into a new `IntegralDomain` +// trait. /// This is a simplified trait for field elements. pub trait Field: Sized @@ -62,7 +63,8 @@ pub trait Field: pub trait FieldExtension { /// Extension field degree. const D: usize; - /// This should be [BaseField; D]. It is an associated type due to rust const generic limitations. + /// This should be [BaseField; D]. It is an associated type due to rust const generic + /// limitations. type Coeffs: Sized; /// Create an extension field element from its base field coefficients. @@ -80,7 +82,8 @@ pub trait FieldExtension { /// Embed a base field element into an extension field element. fn embed(base_elem: BaseField) -> Self; - /// Frobenius map: take `self` to the `p^power`th power, where `p` is the prime characteristic of the field. + /// Frobenius map: take `self` to the `p^power`th power, where `p` is the prime characteristic + /// of the field. fn frobenius_map(&self, power: usize) -> Self; /// Multiply an extension field element by an element in the base field diff --git a/extensions/algebra/guest/src/halo2curves.rs b/extensions/algebra/guest/src/halo2curves.rs index 6571b1e98e..a9a6513ac6 100644 --- a/extensions/algebra/guest/src/halo2curves.rs +++ b/extensions/algebra/guest/src/halo2curves.rs @@ -123,7 +123,8 @@ mod bn254 { } } - /// FieldExtension for Fq12 with Fq6 as base field since halo2curves does not implement `Field` for Fq6. + /// FieldExtension for Fq12 with Fq6 as base field since halo2curves does not implement `Field` + /// for Fq6. impl FieldExtension for Fq12 { const D: usize = 6; type Coeffs = [Fq2; 6]; @@ -289,9 +290,11 @@ mod bls12_381 { } } - /// Note that halo2curves does not implement `Field` for Fq6, so we need to implement the intermediate points manually. + /// Note that halo2curves does not implement `Field` for Fq6, so we need to implement the + /// intermediate points manually. /// - /// FieldExtension for Fq12 with Fq2 as base field since halo2curves does not implement `Field` for Fq6. + /// FieldExtension for Fq12 with Fq2 as base field since halo2curves does not implement `Field` + /// for Fq6. impl FieldExtension for Fq12 { const D: usize = 6; type Coeffs = [Fq2; 6]; diff --git a/extensions/algebra/guest/src/lib.rs b/extensions/algebra/guest/src/lib.rs index 27c78bf42e..1d778f0013 100644 --- a/extensions/algebra/guest/src/lib.rs +++ b/extensions/algebra/guest/src/lib.rs @@ -118,7 +118,8 @@ pub trait IntMod: { /// Underlying representation of IntMod. Usually of the form `[u8; NUM_LIMBS]`. type Repr: AsRef<[u8]> + AsMut<[u8]>; - /// `SelfRef<'a>` should almost always be `&'a Self`. This is a way to include implementations of binary operations where both sides are `&'a Self`. + /// `SelfRef<'a>` should almost always be `&'a Self`. This is a way to include implementations + /// of binary operations where both sides are `&'a Self`. type SelfRef<'a>: Add<&'a Self, Output = Self> + Sub<&'a Self, Output = Self> + Neg diff --git a/extensions/algebra/moduli-macros/src/lib.rs b/extensions/algebra/moduli-macros/src/lib.rs index 78e779a3f5..5d8d921f2f 100644 --- a/extensions/algebra/moduli-macros/src/lib.rs +++ b/extensions/algebra/moduli-macros/src/lib.rs @@ -12,15 +12,16 @@ use syn::{ static MOD_IDX: AtomicUsize = AtomicUsize::new(0); -/// This macro generates the code to setup the modulus for a given prime. Also it places the moduli into a special static variable to be later extracted from the ELF and used by the VM. -/// Usage: +/// This macro generates the code to setup the modulus for a given prime. Also it places the moduli +/// into a special static variable to be later extracted from the ELF and used by the VM. Usage: /// ``` /// moduli_declare! { /// Bls12381 { modulus = "0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab" }, /// Bn254 { modulus = "21888242871839275222246405745257275088696311157297823662689037894645226208583" }, /// } /// ``` -/// This creates two structs, `Bls12381` and `Bn254`, each representing the modular arithmetic class (implementing `Add`, `Sub` and so on). +/// This creates two structs, `Bls12381` and `Bn254`, each representing the modular arithmetic class +/// (implementing `Add`, `Sub` and so on). #[proc_macro] pub fn moduli_declare(input: TokenStream) -> TokenStream { let MacroArgs { items } = parse_macro_input!(input as MacroArgs); @@ -783,7 +784,8 @@ pub fn moduli_init(input: TokenStream) -> TokenStream { let serialized_modulus = core::iter::once(1) // 1 for "modulus" - .chain(core::iter::once(mod_idx as u8)) // mod_idx is u8 for now (can make it u32), because we don't know the order of variables in the elf + .chain(core::iter::once(mod_idx as u8)) // mod_idx is u8 for now (can make it u32), because we don't know the order of + // variables in the elf .chain((modulus_bytes.len() as u32).to_le_bytes().iter().copied()) .chain(modulus_bytes.iter().copied()) .collect::>(); diff --git a/extensions/bigint/circuit/Cargo.toml b/extensions/bigint/circuit/Cargo.toml index 3b96f60a3a..09d68a9d1b 100644 --- a/extensions/bigint/circuit/Cargo.toml +++ b/extensions/bigint/circuit/Cargo.toml @@ -31,7 +31,7 @@ openvm-circuit = { workspace = true, features = ["test-utils"] } openvm-rv32-adapters = { workspace = true, features = ["test-utils"] } [features] -default = ["parallel", "mimalloc"] +default = ["parallel", "jemalloc"] parallel = ["openvm-circuit/parallel"] test-utils = ["openvm-circuit/test-utils"] # performance features: diff --git a/extensions/bigint/circuit/src/extension.rs b/extensions/bigint/circuit/src/extension.rs index 8977d3ff47..b9eeeafd99 100644 --- a/extensions/bigint/circuit/src/extension.rs +++ b/extensions/bigint/circuit/src/extension.rs @@ -5,8 +5,7 @@ use openvm_bigint_transpiler::{ }; use openvm_circuit::{ arch::{ - SystemConfig, SystemExecutor, SystemPeriphery, SystemPort, VmChipComplex, VmConfig, - VmExtension, VmInventory, VmInventoryBuilder, VmInventoryError, + SystemConfig, SystemPort, VmExtension, VmInventory, VmInventoryBuilder, VmInventoryError, }, system::phantom::PhantomChip, }; diff --git a/extensions/ecc/circuit/src/config.rs b/extensions/ecc/circuit/src/config.rs index 91fa25eb91..ab8809aa6d 100644 --- a/extensions/ecc/circuit/src/config.rs +++ b/extensions/ecc/circuit/src/config.rs @@ -1,10 +1,6 @@ -use derive_more::derive::From; use openvm_algebra_circuit::*; -use openvm_circuit::arch::{ - SystemConfig, SystemExecutor, SystemPeriphery, VmChipComplex, VmConfig, VmInventoryError, -}; -use openvm_circuit_derive::{AnyEnum, InstructionExecutor, VmConfig}; -use openvm_circuit_primitives_derive::{Chip, ChipUsageGetter}; +use openvm_circuit::arch::SystemConfig; +use openvm_circuit_derive::VmConfig; use openvm_rv32im_circuit::*; use openvm_stark_backend::p3_field::PrimeField32; use serde::{Deserialize, Serialize}; diff --git a/extensions/ecc/circuit/src/weierstrass_chip/mod.rs b/extensions/ecc/circuit/src/weierstrass_chip/mod.rs index 1738c69556..0bcee1facf 100644 --- a/extensions/ecc/circuit/src/weierstrass_chip/mod.rs +++ b/extensions/ecc/circuit/src/weierstrass_chip/mod.rs @@ -23,8 +23,8 @@ use openvm_stark_backend::p3_field::PrimeField32; /// BLOCK_SIZE: how many cells do we read at a time, must be a power of 2. /// BLOCKS: how many blocks do we need to represent one input or output -/// For example, for bls12_381, BLOCK_SIZE = 16, each element has 3 blocks and with two elements per input AffinePoint, BLOCKS = 6. -/// For secp256k1, BLOCK_SIZE = 32, BLOCKS = 2. +/// For example, for bls12_381, BLOCK_SIZE = 16, each element has 3 blocks and with two elements per +/// input AffinePoint, BLOCKS = 6. For secp256k1, BLOCK_SIZE = 32, BLOCKS = 2. #[derive(Chip, ChipUsageGetter, InstructionExecutor)] pub struct EcAddNeChip( pub VmChipWrapper< diff --git a/extensions/ecc/circuit/src/weierstrass_chip/tests.rs b/extensions/ecc/circuit/src/weierstrass_chip/tests.rs index 751d351200..213918ec2e 100644 --- a/extensions/ecc/circuit/src/weierstrass_chip/tests.rs +++ b/extensions/ecc/circuit/src/weierstrass_chip/tests.rs @@ -192,7 +192,8 @@ fn test_double() { let a_limbs = [BabyBear::ZERO; NUM_LIMBS]; let setup_instruction = rv32_write_heap_default( &mut tester, - vec![prime_limbs, a_limbs], // inputs[0] = prime, inputs[1] = a coeff of weierstrass equation + vec![prime_limbs, a_limbs], /* inputs[0] = prime, inputs[1] = a coeff of weierstrass + * equation */ vec![], chip.0.core.air.offset + Rv32WeierstrassOpcode::SETUP_EC_DOUBLE as usize, ); @@ -280,7 +281,8 @@ fn test_p256_double() { biguint_to_limbs::(a.clone(), LIMB_BITS).map(BabyBear::from_canonical_u32); let setup_instruction = rv32_write_heap_default( &mut tester, - vec![prime_limbs, a_limbs], // inputs[0] = prime, inputs[1] = a coeff of weierstrass equation + vec![prime_limbs, a_limbs], /* inputs[0] = prime, inputs[1] = a coeff of weierstrass + * equation */ vec![], chip.0.core.air.offset + Rv32WeierstrassOpcode::SETUP_EC_DOUBLE as usize, ); diff --git a/extensions/ecc/guest/src/weierstrass.rs b/extensions/ecc/guest/src/weierstrass.rs index 81672fbfb4..e1783e429d 100644 --- a/extensions/ecc/guest/src/weierstrass.rs +++ b/extensions/ecc/guest/src/weierstrass.rs @@ -82,7 +82,8 @@ pub trait FromCompressed { /// Decompresses a point from its x-coordinate and a recovery identifier which indicates /// the parity of the y-coordinate. Given the x-coordinate, this function attempts to find the /// corresponding y-coordinate that satisfies the elliptic curve equation. If successful, it - /// returns the point as an instance of Self. If the point cannot be decompressed, it returns None. + /// returns the point as an instance of Self. If the point cannot be decompressed, it returns + /// None. fn decompress(x: Coordinate, rec_id: &u8) -> Option where Self: core::marker::Sized; @@ -100,8 +101,9 @@ pub trait FromCompressed { fn hint_decompress(x: &Coordinate, rec_id: &u8) -> Option>; } -/// A trait for elliptic curves that bridges the openvm types and external types with CurveArithmetic etc. -/// Implement this for external curves with corresponding openvm point and scalar types. +/// A trait for elliptic curves that bridges the openvm types and external types with +/// CurveArithmetic etc. Implement this for external curves with corresponding openvm point and +/// scalar types. pub trait IntrinsicCurve { type Scalar: Clone; type Point: Clone; @@ -115,7 +117,8 @@ pub trait IntrinsicCurve { // MSM using preprocessed table (windowed method) // Reference: modified from https://github.com/arkworks-rs/algebra/blob/master/ec/src/scalar_mul/mod.rs // -// We specialize to Weierstrass curves and further make optimizations for when the curve order is prime. +// We specialize to Weierstrass curves and further make optimizations for when the curve order is +// prime. /// Cached precomputations of scalar multiples of several base points. /// - `window_bits` is the window size used for the precomputation @@ -232,19 +235,20 @@ where } /// Macro to generate a newtype wrapper for [AffinePoint](crate::AffinePoint) -/// that implements elliptic curve operations by using the underlying field operations according to the -/// [formulas](https://www.hyperelliptic.org/EFD/g1p/auto-shortw.html) for short Weierstrass curves. +/// that implements elliptic curve operations by using the underlying field operations according to +/// the [formulas](https://www.hyperelliptic.org/EFD/g1p/auto-shortw.html) for short Weierstrass curves. /// /// The following imports are required: /// ```rust /// use core::ops::AddAssign; /// /// use openvm_algebra_guest::{DivUnsafe, Field}; -/// use openvm_ecc_guest::{AffinePoint, Group, weierstrass::WeierstrassPoint}; +/// use openvm_ecc_guest::{weierstrass::WeierstrassPoint, AffinePoint, Group}; /// ``` #[macro_export] macro_rules! impl_sw_affine { - // Assumes `a = 0` in curve equation. `$three` should be a constant expression for `3` of type `$field`. + // Assumes `a = 0` in curve equation. `$three` should be a constant expression for `3` of type + // `$field`. ($struct_name:ident, $field:ty, $three:expr, $b:expr) => { /// A newtype wrapper for [AffinePoint] that implements elliptic curve operations /// by using the underlying field operations according to the [formulas](https://www.hyperelliptic.org/EFD/g1p/auto-shortw.html) for short Weierstrass curves. diff --git a/extensions/ecc/sw-macros/src/lib.rs b/extensions/ecc/sw-macros/src/lib.rs index dbf8117bac..c739b1965c 100644 --- a/extensions/ecc/sw-macros/src/lib.rs +++ b/extensions/ecc/sw-macros/src/lib.rs @@ -8,15 +8,17 @@ use syn::{ parse_macro_input, Expr, ExprPath, Path, Token, }; -/// This macro generates the code to setup the elliptic curve for a given modular type. Also it places the curve parameters into a special static variable to be later extracted from the ELF and used by the VM. -/// Usage: +/// This macro generates the code to setup the elliptic curve for a given modular type. Also it +/// places the curve parameters into a special static variable to be later extracted from the ELF +/// and used by the VM. Usage: /// ``` /// sw_declare! { /// [TODO] /// } /// ``` /// -/// For this macro to work, you must import the `elliptic_curve` crate and the `openvm_ecc_guest` crate. +/// For this macro to work, you must import the `elliptic_curve` crate and the `openvm_ecc_guest` +/// crate. #[proc_macro] pub fn sw_declare(input: TokenStream) -> TokenStream { let MacroArgs { items } = parse_macro_input!(input as MacroArgs); @@ -45,11 +47,13 @@ pub fn sw_declare(input: TokenStream) -> TokenStream { } } "a" => { - // We currently leave it to the compiler to check if the expression is actually a constant + // We currently leave it to the compiler to check if the expression is actually + // a constant const_a = Some(param.value); } "b" => { - // We currently leave it to the compiler to check if the expression is actually a constant + // We currently leave it to the compiler to check if the expression is actually + // a constant const_b = Some(param.value); } _ => { diff --git a/extensions/ecc/tests/programs/examples/decompress.rs b/extensions/ecc/tests/programs/examples/decompress.rs index f47b7a05e0..6f549d311c 100644 --- a/extensions/ecc/tests/programs/examples/decompress.rs +++ b/extensions/ecc/tests/programs/examples/decompress.rs @@ -138,8 +138,9 @@ fn test_possible_decompression { /// dst <- \[dst_ptr:4\]_1 pub dst: [T; RV32_REGISTER_NUM_LIMBS], /// src <- \[src_ptr:4\]_1 - /// We store src_limbs\[i\] = \[src_ptr + i + 1\]_1 and src = u32(\[src_ptr:4\]_1) from which \[src_ptr\]_1 - /// can be recovered by linear combination. + /// We store src_limbs\[i\] = \[src_ptr + i + 1\]_1 and src = u32(\[src_ptr:4\]_1) from which + /// \[src_ptr\]_1 can be recovered by linear combination. /// We do this because `src` needs to be incremented between keccak-f permutations. pub src_limbs: [T; RV32_REGISTER_NUM_LIMBS - 1], pub src: T, @@ -97,8 +97,8 @@ pub struct KeccakMemoryCols { pub register_aux: [MemoryReadAuxCols; KECCAK_REGISTER_READS], pub absorb_reads: [MemoryReadAuxCols; KECCAK_ABSORB_READS], pub digest_writes: [MemoryWriteAuxCols; KECCAK_DIGEST_WRITES], - /// The input bytes are batch read in blocks of private constant KECCAK_WORD_SIZE bytes. However - /// if the input length is not a multiple of KECCAK_WORD_SIZE, we read into + /// The input bytes are batch read in blocks of private constant KECCAK_WORD_SIZE bytes. + /// However if the input length is not a multiple of KECCAK_WORD_SIZE, we read into /// `partial_block` more bytes than we need. On the other hand `block_bytes` expects /// only the partial block of bytes and then the correctly padded bytes. /// We will select between `partial_block` and `block_bytes` for what to read from memory. diff --git a/extensions/keccak256/circuit/src/extension.rs b/extensions/keccak256/circuit/src/extension.rs index 7f7749e53d..d24681fb55 100644 --- a/extensions/keccak256/circuit/src/extension.rs +++ b/extensions/keccak256/circuit/src/extension.rs @@ -1,8 +1,7 @@ use derive_more::derive::From; use openvm_circuit::{ arch::{ - SystemConfig, SystemExecutor, SystemPeriphery, SystemPort, VmChipComplex, VmConfig, - VmExtension, VmInventory, VmInventoryBuilder, VmInventoryError, + SystemConfig, SystemPort, VmExtension, VmInventory, VmInventoryBuilder, VmInventoryError, }, system::phantom::PhantomChip, }; diff --git a/extensions/keccak256/circuit/src/lib.rs b/extensions/keccak256/circuit/src/lib.rs index a0640054ab..c9fd1c9f5a 100644 --- a/extensions/keccak256/circuit/src/lib.rs +++ b/extensions/keccak256/circuit/src/lib.rs @@ -92,7 +92,8 @@ pub struct KeccakRecord { #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct KeccakInputBlock { - /// Memory reads for non-padding bytes in this block. Length is at most [KECCAK_RATE_BYTES / KECCAK_WORD_SIZE]. + /// Memory reads for non-padding bytes in this block. Length is at most [KECCAK_RATE_BYTES / + /// KECCAK_WORD_SIZE]. pub reads: Vec, /// Index in `reads` of the memory read for < KECCAK_WORD_SIZE bytes, if any. pub partial_read_idx: Option, @@ -250,8 +251,8 @@ impl InstructionExecutor for KeccakVmChip { // Add the events to chip state for later trace generation usage self.records.push(record); - // NOTE: Check this is consistent with KeccakVmAir::timestamp_change (we don't use it to avoid - // unnecessary conversions here) + // NOTE: Check this is consistent with KeccakVmAir::timestamp_change (we don't use it to + // avoid unnecessary conversions here) let total_timestamp_delta = len + (KECCAK_REGISTER_READS + KECCAK_ABSORB_READS + KECCAK_DIGEST_WRITES) as u32; memory.increment_timestamp_by(total_timestamp_delta - timestamp_delta); diff --git a/extensions/keccak256/circuit/src/trace.rs b/extensions/keccak256/circuit/src/trace.rs index 2f37db6a0d..c314c38eac 100644 --- a/extensions/keccak256/circuit/src/trace.rs +++ b/extensions/keccak256/circuit/src/trace.rs @@ -205,7 +205,8 @@ where first_row.sponge.is_new_start = Val::::from_bool(block.is_new_start); first_row.sponge.state_hi = diff.pre_hi.map(Val::::from_canonical_u8); first_row.instruction.is_enabled_first_round = first_row.instruction.is_enabled; - // Make memory access aux columns. Any aux column not explicitly defined defaults to all 0s + // Make memory access aux columns. Any aux column not explicitly defined defaults to + // all 0s if let Some(register_reads) = diff.register_reads { let need_range_check = [ ®ister_reads[0], // dst diff --git a/extensions/native/circuit/src/castf/core.rs b/extensions/native/circuit/src/castf/core.rs index 0d775f1c94..664767e35e 100644 --- a/extensions/native/circuit/src/castf/core.rs +++ b/extensions/native/circuit/src/castf/core.rs @@ -34,7 +34,8 @@ pub struct CastFCoreCols { #[derive(Copy, Clone, Debug)] pub struct CastFCoreAir { - pub bus: VariableRangeCheckerBus, // to communicate with the range checker that checks that all limbs are < 2^LIMB_BITS + pub bus: VariableRangeCheckerBus, /* to communicate with the range checker that checks that + * all limbs are < 2^LIMB_BITS */ } impl BaseAir for CastFCoreAir { diff --git a/extensions/native/circuit/src/extension.rs b/extensions/native/circuit/src/extension.rs index fbaf11ff74..8f73423e40 100644 --- a/extensions/native/circuit/src/extension.rs +++ b/extensions/native/circuit/src/extension.rs @@ -6,8 +6,8 @@ use loadstore_native_adapter::NativeLoadStoreAdapterChip; use native_vectorized_adapter::NativeVectorizedAdapterChip; use openvm_circuit::{ arch::{ - ExecutionBridge, MemoryConfig, SystemConfig, SystemExecutor, SystemPeriphery, SystemPort, - VmChipComplex, VmConfig, VmExtension, VmInventory, VmInventoryBuilder, VmInventoryError, + ExecutionBridge, MemoryConfig, SystemConfig, SystemPort, VmExtension, VmInventory, + VmInventoryBuilder, VmInventoryError, }, system::phantom::PhantomChip, }; diff --git a/extensions/native/circuit/src/field_arithmetic/core.rs b/extensions/native/circuit/src/field_arithmetic/core.rs index 18b2a02612..c813f6a066 100644 --- a/extensions/native/circuit/src/field_arithmetic/core.rs +++ b/extensions/native/circuit/src/field_arithmetic/core.rs @@ -71,7 +71,8 @@ where // - Exactly one flag in `flags` is true. // - The inner product of the `flags` and `opcodes` equals `io.opcode`. // - The inner product of the `flags` and `results` equals `io.z`. - // - If `is_div` is true, then `aux.divisor_inv` correctly represents the multiplicative inverse of `io.y`. + // - If `is_div` is true, then `aux.divisor_inv` correctly represents the multiplicative + // inverse of `io.y`. let mut is_valid = AB::Expr::ZERO; let mut expected_opcode = AB::Expr::ZERO; diff --git a/extensions/native/circuit/src/fri/mod.rs b/extensions/native/circuit/src/fri/mod.rs index 054607a34a..7dbc3fd851 100644 --- a/extensions/native/circuit/src/fri/mod.rs +++ b/extensions/native/circuit/src/fri/mod.rs @@ -133,8 +133,8 @@ struct GeneralCols { /// Whether the row is an instruction row. is_ins_row: T, /// For Instruction1 rows, the initial timestamp of the FRI_REDUCED_OPENING instruction. - /// For Workload rows, the final timestamp after processing the next elements minus `INSTRUCTION_READS`. - /// For Instruction2 rows, unused. + /// For Workload rows, the final timestamp after processing the next elements minus + /// `INSTRUCTION_READS`. For Instruction2 rows, unused. timestamp: T, } const GENERAL_WIDTH: usize = GeneralCols::::width(); @@ -159,8 +159,8 @@ struct DataCols { /// b-values. (Note: idx increases within a workload/instruction block, while timestamp, a_ptr, /// and b_ptr decrease.) idx: T, - /// For both Instruction1 and Workload rows, equal to sum_{k=0}^{idx} alpha^{len-i} (b_i - a_i). - /// Instruction1 rows constrain this to be the result written to `mem[result_ptr]`. + /// For both Instruction1 and Workload rows, equal to sum_{k=0}^{idx} alpha^{len-i} (b_i - + /// a_i). Instruction1 rows constrain this to be the result written to `mem[result_ptr]`. result: [T; EXT_DEG], /// The alpha to use in this instruction. Fixed across workload rows; Instruction1 rows read /// this from `mem[alpha_ptr]`. @@ -175,8 +175,8 @@ const_assert_eq!(DATA_WIDTH, 12); #[derive(Debug, AlignedBorrow)] struct PrefixCols { general: GeneralCols, - /// WorkloadCols uses this column as the value of `a` read. Instruction1Cols uses this column as - /// the `is_first` flag must be set to one. Shared with Instruction2Cols `is_first`. + /// WorkloadCols uses this column as the value of `a` read. Instruction1Cols uses this column + /// as the `is_first` flag must be set to one. Shared with Instruction2Cols `is_first`. a_or_is_first: T, data: DataCols, } @@ -285,8 +285,9 @@ impl FriReducedOpeningAir { // a_ptr/b_ptr/length/result let ptr_reads = AB::F::from_canonical_usize(INSTRUCTION_READS); let native_as = AB::Expr::from_canonical_u32(AS::Native as u32); - // write_a itself could be anything on non-workload row, but on workload row, it must be boolean. - // write_a on last workflow row will be constrained to equal write_a on instruction1 row, implying the latter is boolean. + // write_a itself could be anything on non-workload row, but on workload row, it must be + // boolean. write_a on last workflow row will be constrained to equal write_a on + // instruction1 row, implying the latter is boolean. builder.when(multiplicity).assert_bool(local_data.write_a); // read a when write_a is 0 self.memory_bridge diff --git a/extensions/native/circuit/src/poseidon2/columns.rs b/extensions/native/circuit/src/poseidon2/columns.rs index dc67a3363f..6c47c23245 100644 --- a/extensions/native/circuit/src/poseidon2/columns.rs +++ b/extensions/native/circuit/src/poseidon2/columns.rs @@ -10,8 +10,8 @@ use crate::{poseidon2::CHUNK, utils::const_max}; /// 2. **Simple Block:** A single row handling permutation/compression operations. /// 3. **Inside-Row Block:** A sequence of rows that compute the row-hash for all input matrix /// columns corresponding to an `MmcsVerifyBatch` input of the same height. -/// 4. **Top-Level Block:** A sequence of rows that perform Merkle tree compression on the row hashes -/// produced from an `MmcsVerifyBatch` input. +/// 4. **Top-Level Block:** A sequence of rows that perform Merkle tree compression on the row +/// hashes produced from an `MmcsVerifyBatch` input. #[repr(C)] #[derive(AlignedBorrow)] pub struct NativePoseidon2Cols { @@ -47,9 +47,10 @@ pub struct NativePoseidon2Cols { /// associated with the same instruction. pub opened_element_size_inv: T, - /// On an `incorporate_row` row, this is the first matrix index `i` for which `log_heights[i]` equals `log_height`. - /// On an `incorporate_sibling` row, this holds the initial index corresponding to the `log_height` for the next - /// `incorporate_row` row, or `opened_length` if none exists. + /// On an `incorporate_row` row, this is the first matrix index `i` for which `log_heights[i]` + /// equals `log_height`. On an `incorporate_sibling` row, this holds the initial index + /// corresponding to the `log_height` for the next `incorporate_row` row, or + /// `opened_length` if none exists. pub initial_opened_index: T, /// Pointer to the beginning of the `opened_values` array. @@ -75,9 +76,10 @@ pub struct TopLevelSpecificCols { /// The program counter for the VERIFY_BATCH instruction being processed. pub pc: T, - /// The timestamp marking the end of processing this top-level row. For an `incorporate_sibling` row, - /// it increases by a fixed amount. For an `incorporate_row` row, its increase depends on the row's length - /// and the number of matrices involved, with additional constraints imposed by the internal bus. + /// The timestamp marking the end of processing this top-level row. For an + /// `incorporate_sibling` row, it increases by a fixed amount. For an `incorporate_row` + /// row, its increase depends on the row's length and the number of matrices involved, with + /// additional constraints imposed by the internal bus. pub end_timestamp: T, /// Operand `a` from the instruction. Pointer to the `dimensions` array. @@ -86,7 +88,8 @@ pub struct TopLevelSpecificCols { pub opened_register: T, /// Operand `c` from the instruction. Pointer to the length of the `opened_values` array. pub opened_length_register: T, - /// Operand `d` from the instruction. Provided as a hint to the run-time and (otherwise unconstrained). + /// Operand `d` from the instruction. Provided as a hint to the run-time and (otherwise + /// unconstrained). pub proof_id: T, /// Operand `e` from the instruction. Pointer to the pointer of the `index_bits` array, which /// indicates the direction (left/right) of Merkle tree siblings. @@ -94,8 +97,9 @@ pub struct TopLevelSpecificCols { /// Operand `f` from the instruction. Pointer to the pointer of the expected Merkle root. pub commit_register: T, - /// For an `incorporate_row` row, the largest matrix index `i` such that `log_heights[i]` equals `log_height`. - /// For an `incorporate_sibling` row, this is set to `initial_opened_index - 1` for bookkeeping. + /// For an `incorporate_row` row, the largest matrix index `i` such that `log_heights[i]` + /// equals `log_height`. For an `incorporate_sibling` row, this is set to + /// `initial_opened_index - 1` for bookkeeping. pub final_opened_index: T, /// The log height of the matrices currently being incorporated. Remains fixed on @@ -124,7 +128,8 @@ pub struct TopLevelSpecificCols { pub commit_pointer_read: MemoryReadAuxCols, /// Index into the Merkle proof for the next sibling to incorporate. - /// Starts at zero in a top-level block and increments by one after each `incorporate_sibling` row. + /// Starts at zero in a top-level block and increments by one after each `incorporate_sibling` + /// row. pub proof_index: T, /// Memory aux columns for reading either `initial_height` or `sibling_is_on_right`. On an @@ -135,7 +140,8 @@ pub struct TopLevelSpecificCols { pub read_final_height: MemoryReadAuxCols, /// Indicator for whether the sibling being incorporated (if any) is on the right. Constrained - /// to equal `index_bits[proof_index]` on `incorporate_sibling` rows. Unconstrained on other rows. + /// to equal `index_bits[proof_index]` on `incorporate_sibling` rows. Unconstrained on other + /// rows. pub sibling_is_on_right: T, /// Pointer to the Merkle root. pub commit_pointer: T, diff --git a/extensions/native/compiler/src/asm/instruction.rs b/extensions/native/compiler/src/asm/instruction.rs index bc5ce3d021..1aa5ea8527 100644 --- a/extensions/native/compiler/src/asm/instruction.rs +++ b/extensions/native/compiler/src/asm/instruction.rs @@ -14,7 +14,8 @@ pub enum AsmInstruction { /// Load extension word (dst, src, var_index, size, offset). /// - /// Load an extension from the address stored at src(fp) into dst(fp) with given index and offset. + /// Load an extension from the address stored at src(fp) into dst(fp) with given index and + /// offset. LoadEI(i32, i32, F, F, F), /// Store word (val, addr, var_index, size, offset) @@ -24,7 +25,8 @@ pub enum AsmInstruction { /// Store extension word (val, addr, var_index, size, offset) /// - /// Store an extension from val(fp) into the address stored at addr(fp) with given index and offset. + /// Store an extension from val(fp) into the address stored at addr(fp) with given index and + /// offset. StoreEI(i32, i32, F, F, F), /// Set dst = imm. @@ -147,7 +149,8 @@ pub enum AsmInstruction { /// HintBits(src, len). /// - /// Bit decompose the field element at pointer `src` to the first `len` little endian bits and add to hint stream. + /// Bit decompose the field element at pointer `src` to the first `len` little endian bits and + /// add to hint stream. HintBits(i32, u32), HintLoad(), diff --git a/extensions/native/compiler/src/constraints/halo2/baby_bear.rs b/extensions/native/compiler/src/constraints/halo2/baby_bear.rs index 3c8de4748c..55c1a25deb 100644 --- a/extensions/native/compiler/src/constraints/halo2/baby_bear.rs +++ b/extensions/native/compiler/src/constraints/halo2/baby_bear.rs @@ -87,7 +87,8 @@ impl BabyBearChip { r } - /// Reduce max_bits if possible. This function doesn't guarantee that the actual value is within BabyBear. + /// Reduce max_bits if possible. This function doesn't guarantee that the actual value is within + /// BabyBear. pub fn reduce_max_bits(&self, ctx: &mut Context, a: AssignedBabyBear) -> AssignedBabyBear { if a.max_bits > BABYBEAR_MAX_BITS { self.reduce(ctx, a) @@ -242,7 +243,8 @@ impl BabyBearChip { c } - // This inner product function will be used exclusively for optimizing extension element multiplication. + // This inner product function will be used exclusively for optimizing extension element + // multiplication. fn special_inner_product( &self, ctx: &mut Context, diff --git a/extensions/native/compiler/src/constraints/halo2/compiler.rs b/extensions/native/compiler/src/constraints/halo2/compiler.rs index 404dc4cecd..ce108addaa 100644 --- a/extensions/native/compiler/src/constraints/halo2/compiler.rs +++ b/extensions/native/compiler/src/constraints/halo2/compiler.rs @@ -434,8 +434,9 @@ impl Halo2ConstraintCompiler { ext_chip.assert_equal(ctx, exts[&a.0], exts[&b.0]); } DslIr::AssertEqEI(a, b) => { - // Note: we could check if each coordinate of `b` is zero separately for a little more efficiency, - // but omitting to simplify the code + // Note: we could check if each coordinate of `b` is zero separately for a + // little more efficiency, but omitting to simplify + // the code if b.is_zero() { ext_chip.assert_zero(ctx, exts[&a.0]); } else { @@ -541,8 +542,8 @@ pub fn convert_efr>(a: &EF) -> Vec { fn stats_snapshot(ctx: &Context, range_chip: Arc>) -> Halo2Stats { Halo2Stats { total_gate_cell: ctx.advice.len(), - // Note[Xinding]: this is inaccurate because of duplicated constants. But it's too slow if we always - // check for duplicates. + // Note[Xinding]: this is inaccurate because of duplicated constants. But it's too slow if + // we always check for duplicates. total_fixed: ctx.copy_manager.lock().unwrap().constant_equalities.len(), total_lookup_cell: range_chip.lookup_manager()[0].total_rows(), } diff --git a/extensions/native/compiler/src/constraints/halo2/poseidon2_perm.rs b/extensions/native/compiler/src/constraints/halo2/poseidon2_perm.rs index 6c7294f41c..f39c9b98a7 100644 --- a/extensions/native/compiler/src/constraints/halo2/poseidon2_perm.rs +++ b/extensions/native/compiler/src/constraints/halo2/poseidon2_perm.rs @@ -114,7 +114,8 @@ impl Poseidon2State { // Matrix is circ(2, 1, 1) let sum = gate.sum(ctx, self.s.iter().copied()); for (i, x) in self.s.iter_mut().enumerate() { - // This is the same as `*x = gate.add(ctx, *x, sum)` but we save a cell by reusing `sum`: + // This is the same as `*x = gate.add(ctx, *x, sum)` but we save a cell by reusing + // `sum`: if i % 2 == 0 { ctx.assign_region( [ @@ -160,7 +161,8 @@ impl Poseidon2State { assert_eq!(T, 3); let sum = gate.sum(ctx, self.s.iter().copied()); for i in 0..T { - // This is the same as `self.s[i] = gate.mul_add(ctx, self.s[i], Constant(mat_internal_diag_m_1[i]), sum)` but we save a cell by reusing `sum`. + // This is the same as `self.s[i] = gate.mul_add(ctx, self.s[i], + // Constant(mat_internal_diag_m_1[i]), sum)` but we save a cell by reusing `sum`. if i % 2 == 0 { ctx.assign_region( [ diff --git a/extensions/native/compiler/src/conversion/mod.rs b/extensions/native/compiler/src/conversion/mod.rs index 0a202b69fa..edf10467cf 100644 --- a/extensions/native/compiler/src/conversion/mod.rs +++ b/extensions/native/compiler/src/conversion/mod.rs @@ -103,7 +103,8 @@ fn i32_f(x: i32) -> F { } } -/// Warning: for extension field branch instructions, the `pc, labels` **must** be using `DEFAULT_PC_STEP`. +/// Warning: for extension field branch instructions, the `pc, labels` **must** be using +/// `DEFAULT_PC_STEP`. fn convert_instruction>( instruction: AsmInstruction, debug_info: Option, diff --git a/extensions/native/compiler/src/ir/bits.rs b/extensions/native/compiler/src/ir/bits.rs index 7cb5167be5..2fd7d5a97b 100644 --- a/extensions/native/compiler/src/ir/bits.rs +++ b/extensions/native/compiler/src/ir/bits.rs @@ -6,8 +6,8 @@ use openvm_stark_sdk::p3_baby_bear::BabyBear; use super::{Array, Builder, Config, DslIr, Felt, MemIndex, Var}; impl Builder { - /// Converts a felt to bits. Will result in a failed assertion if `num` has more than `num_bits` bits. - /// Only works for C::F = BabyBear + /// Converts a felt to bits. Will result in a failed assertion if `num` has more than `num_bits` + /// bits. Only works for C::F = BabyBear pub fn num2bits_f(&mut self, num: Felt, num_bits: u32) -> Array> { assert_eq!(TypeId::of::(), TypeId::of::()); @@ -46,12 +46,12 @@ impl Builder { // * 2^30 + ... + 2^x + y for y in [0, 2^(x - 1)) and 27 < x <= 30 // * 2^30 + ... + 2^27 // * y for y in [0, 2^27) - // To check that bits `b[0], ..., b[30]` represent `num = b[0] + ... + b[30] * 2^30` without overflow, - // we may check that: - // * if `num_bits < 27`, then `b[30] = 0`, so overflow is impossible. - // In this case, `suffix_bit_sum = 0`, so the check below passes. - // * if `num_bits >= 27`, then we must check: - // if `suffix_bit_sum = b[27] + ... + b[30] = 4`, then `prefix_sum = b[0] + ... + b[26] * 2^26 = 0` + // To check that bits `b[0], ..., b[30]` represent `num = b[0] + ... + b[30] * 2^30` without + // overflow, we may check that: + // * if `num_bits < 27`, then `b[30] = 0`, so overflow is impossible. In this case, + // `suffix_bit_sum = 0`, so the check below passes. + // * if `num_bits >= 27`, then we must check: if `suffix_bit_sum = b[27] + ... + b[30] = + // 4`, then `prefix_sum = b[0] + ... + b[26] * 2^26 = 0` let suffix_bit_sum_var = self.cast_felt_to_var(suffix_bit_sum); self.if_eq(suffix_bit_sum_var, C::N::from_canonical_u32(4)) .then(|builder| { diff --git a/extensions/native/compiler/src/ir/builder.rs b/extensions/native/compiler/src/ir/builder.rs index 82b43ab53d..966c0db21c 100644 --- a/extensions/native/compiler/src/ir/builder.rs +++ b/extensions/native/compiler/src/ir/builder.rs @@ -645,7 +645,8 @@ impl Builder { self.assign(&nb_public_values, nb_public_values + C::N::ONE); } - /// Commits a Var as public value. This value will be constrained when verified. This method should only be used in static mode. + /// Commits a Var as public value. This value will be constrained when verified. This method + /// should only be used in static mode. pub fn static_commit_public_value(&mut self, index: usize, val: Var) { assert!( self.flags.static_only, diff --git a/extensions/native/compiler/src/ir/instructions.rs b/extensions/native/compiler/src/ir/instructions.rs index 24ea9d0830..9159c5775b 100644 --- a/extensions/native/compiler/src/ir/instructions.rs +++ b/extensions/native/compiler/src/ir/instructions.rs @@ -51,9 +51,11 @@ pub enum DslIr { SubFIN(Felt, C::F, Felt), /// Subtracts two extension field elements (ext = ext - ext). SubE(Ext, Ext, Ext), - /// Subtracts an extension field element and an extension field immediate (ext = ext - ext field imm). + /// Subtracts an extension field element and an extension field immediate (ext = ext - ext + /// field imm). SubEI(Ext, Ext, C::EF), - /// Subtracts an extension field immediate and an extension field element (ext = ext field imm - ext). + /// Subtracts an extension field immediate and an extension field element (ext = ext field imm + /// - ext). SubEIN(Ext, C::EF, Ext), /// Subtracts an extension field element and a field immediate (ext = ext - field imm). SubEFI(Ext, Ext, C::F), @@ -71,7 +73,8 @@ pub enum DslIr { MulFI(Felt, Felt, C::F), /// Multiplies two extension field elements (ext = ext * ext). MulE(Ext, Ext, Ext), - /// Multiplies an extension field element and an extension field immediate (ext = ext * ext field imm). + /// Multiplies an extension field element and an extension field immediate (ext = ext * ext + /// field imm). MulEI(Ext, Ext, C::EF), /// Multiplies an extension field element and a field immediate (ext = ext * field imm). MulEFI(Ext, Ext, C::F), @@ -87,9 +90,11 @@ pub enum DslIr { DivFIN(Felt, C::F, Felt), /// Divides two extension field elements (ext = ext / ext). DivE(Ext, Ext, Ext), - /// Divides an extension field element and an extension field immediate (ext = ext / ext field imm). + /// Divides an extension field element and an extension field immediate (ext = ext / ext field + /// imm). DivEI(Ext, Ext, C::EF), - /// Divides and extension field immediate and an extension field element (ext = ext field imm / ext). + /// Divides and extension field immediate and an extension field element (ext = ext field imm / + /// ext). DivEIN(Ext, C::EF, Ext), /// Divides an extension field element and a field immediate (ext = ext / field imm). DivEFI(Ext, Ext, C::F), @@ -122,23 +127,27 @@ pub enum DslIr { TracedVec>, ), - /// Executes an equal conditional branch with the parameters (lhs var, rhs var, then body, else body). + /// Executes an equal conditional branch with the parameters (lhs var, rhs var, then body, else + /// body). IfEq( Var, Var, TracedVec>, TracedVec>, ), - /// Executes a not equal conditional branch with the parameters (lhs var, rhs var, then body, else body). + /// Executes a not equal conditional branch with the parameters (lhs var, rhs var, then body, + /// else body). IfNe( Var, Var, TracedVec>, TracedVec>, ), - /// Executes an equal conditional branch with the parameters (lhs var, rhs imm, then body, else body). + /// Executes an equal conditional branch with the parameters (lhs var, rhs imm, then body, else + /// body). IfEqI(Var, C::N, TracedVec>, TracedVec>), - /// Executes a not equal conditional branch with the parameters (lhs var, rhs imm, then body, else body). + /// Executes a not equal conditional branch with the parameters (lhs var, rhs imm, then body, + /// else body). IfNeI(Var, C::N, TracedVec>, TracedVec>), // Assertions. @@ -152,7 +161,8 @@ pub enum DslIr { AssertEqVI(Var, C::N), /// Assert that a field element is equal to a field immediate (felt == field imm). AssertEqFI(Felt, C::F), - /// Assert that an extension field element is equal to an extension field immediate (ext == ext field imm). + /// Assert that an extension field element is equal to an extension field immediate (ext == ext + /// field imm). AssertEqEI(Ext, C::EF), /// Assert that a usize is not zero (usize != 0). @@ -179,7 +189,8 @@ pub enum DslIr { StoreHeapPtr(Ptr), // Bits. - /// Decompose a field element into bits (bits = num2bits(felt)). Should only be used when target is a circuit. + /// Decompose a field element into bits (bits = num2bits(felt)). Should only be used when + /// target is a circuit. CircuitNum2BitsF(Felt, Vec>), /// Decompose a Var into 16-bit limbs. CircuitVarTo64BitsF(Var, [Felt; 4]), @@ -187,14 +198,15 @@ pub enum DslIr { // Hashing. /// Permutes an array of baby bear elements using Poseidon2 (output = p2_permute(array)). Poseidon2PermuteBabyBear(Array>, Array>), - /// Compresses two baby bear element arrays using Poseidon2 (output = p2_compress(array1, array2)). + /// Compresses two baby bear element arrays using Poseidon2 (output = p2_compress(array1, + /// array2)). Poseidon2CompressBabyBear( Array>, Array>, Array>, ), - /// Permutes an array of Bn254 elements using Poseidon2 (output = p2_permute(array)). Should only - /// be used when target is a circuit. + /// Permutes an array of Bn254 elements using Poseidon2 (output = p2_permute(array)). Should + /// only be used when target is a circuit. CircuitPoseidon2Permute([Var; 3]), // Miscellaneous instructions. @@ -230,18 +242,19 @@ pub enum DslIr { Halt, // Public inputs for circuits. - /// Publish a field element as the ith public value. Should only be used when target is a circuit. + /// Publish a field element as the ith public value. Should only be used when target is a + /// circuit. CircuitPublish(Var, usize), // FRI specific instructions. /// Select's a variable based on a condition. (select(cond, true_val, false_val) => output). /// Should only be used when target is a circuit. CircuitSelectV(Var, Var, Var, Var), - /// Select's a field element based on a condition. (select(cond, true_val, false_val) => output). - /// Should only be used when target is a circuit. + /// Select's a field element based on a condition. (select(cond, true_val, false_val) => + /// output). Should only be used when target is a circuit. CircuitSelectF(Var, Felt, Felt, Felt), - /// Select's an extension field element based on a condition. (select(cond, true_val, false_val) => output). - /// Should only be used when target is a circuit. + /// Select's an extension field element based on a condition. (select(cond, true_val, + /// false_val) => output). Should only be used when target is a circuit. CircuitSelectE( Var, Ext, @@ -290,8 +303,9 @@ pub enum DslIr { /// Assert that v < 2^bit. RangeCheckV(Var, usize), - /// Start the cycle tracker used by a block of code annotated by the string input. Calling this with the same - /// string will end the open cycle tracker instance and start a new one with an increasing numeric postfix. + /// Start the cycle tracker used by a block of code annotated by the string input. Calling this + /// with the same string will end the open cycle tracker instance and start a new one with + /// an increasing numeric postfix. CycleTrackerStart(String), /// End the cycle tracker used by a block of code annotated by the string input. CycleTrackerEnd(String), diff --git a/extensions/native/compiler/src/ir/symbolic.rs b/extensions/native/compiler/src/ir/symbolic.rs index 838577fa18..d5dfa9abdd 100644 --- a/extensions/native/compiler/src/ir/symbolic.rs +++ b/extensions/native/compiler/src/ir/symbolic.rs @@ -792,7 +792,8 @@ impl> Neg for SymbolicExt { } } -// Implement all operations between N, F, EF, and SymbolicVar, SymbolicFelt, SymbolicExt +// Implement all operations between N, F, EF, and SymbolicVar, SymbolicFelt, SymbolicExt impl Add for SymbolicFelt { type Output = Self; diff --git a/extensions/native/recursion/Cargo.toml b/extensions/native/recursion/Cargo.toml index 851a9f0ad7..c799671a55 100644 --- a/extensions/native/recursion/Cargo.toml +++ b/extensions/native/recursion/Cargo.toml @@ -18,10 +18,7 @@ p3-dft = { workspace = true } p3-fri = { workspace = true } p3-symmetric = { workspace = true } p3-merkle-tree = { workspace = true } -snark-verifier-sdk = { workspace = true, features = [ - "loader_evm", - "revm", -], optional = true } +snark-verifier-sdk = { workspace = true, optional = true } itertools.workspace = true rand.workspace = true serde.workspace = true @@ -39,20 +36,25 @@ tempfile = "3.14.0" bitcode = { workspace = true } [features] -default = ["parallel", "mimalloc"] -parallel = ["openvm-stark-backend/parallel"] +default = ["parallel", "jemalloc"] static-verifier = [ "openvm-native-compiler/halo2-compiler", - "dep:snark-verifier-sdk", + "snark-verifier-sdk", "dep:once_cell", "dep:serde_with", ] +evm-prove = ["static-verifier", "snark-verifier-sdk/loader_evm"] +evm-verify = [ + "evm-prove", + "snark-verifier-sdk/revm", +] # evm-verify needs REVM to simulate EVM contract verification test-utils = ["openvm-circuit/test-utils"] bench-metrics = [ "dep:metrics", "openvm-circuit/bench-metrics", "openvm-native-compiler/bench-metrics", ] +parallel = ["openvm-stark-backend/parallel"] mimalloc = ["openvm-stark-backend/mimalloc"] jemalloc = ["openvm-stark-backend/jemalloc"] nightly-features = ["openvm-circuit/nightly-features"] diff --git a/extensions/native/recursion/src/fri/domain.rs b/extensions/native/recursion/src/fri/domain.rs index 5c7cd3fa02..cdc8fc242c 100644 --- a/extensions/native/recursion/src/fri/domain.rs +++ b/extensions/native/recursion/src/fri/domain.rs @@ -108,8 +108,8 @@ where shift: builder.eval(self.shift * domain_power), g, }; - // ATTENTION: here must use `builder.set_value`. `builder.set` will convert `Usize::Const` - // to `Usize::Var` because it calls `builder.eval`. + // ATTENTION: here must use `builder.set_value`. `builder.set` will convert + // `Usize::Const` to `Usize::Var` because it calls `builder.eval`. builder.set_value(&domains, i_vec[0], domain); builder.assign(&domain_power, domain_power * g_dom); }); diff --git a/extensions/native/recursion/src/fri/mod.rs b/extensions/native/recursion/src/fri/mod.rs index e60e64b589..c8b3454a19 100644 --- a/extensions/native/recursion/src/fri/mod.rs +++ b/extensions/native/recursion/src/fri/mod.rs @@ -70,7 +70,8 @@ where let step = builder.get(&proof.commit_phase_openings, i); let beta = builder.get(betas, i); - // reduced_openings.len() == MAX_TWO_ADICITY >= log_max_lde_height >= log_folded_height_plus_one + // reduced_openings.len() == MAX_TWO_ADICITY >= log_max_lde_height >= + // log_folded_height_plus_one let reduced_opening = builder.get(reduced_openings, log_folded_height_plus_one); builder.assign(&folded_eval, folded_eval + reduced_opening); @@ -285,7 +286,8 @@ where let nested_opened_values_buffer = if builder.flags.static_only { builder.array(REDUCER_BUFFER_SIZE) } else { - // This points to the same memory. Only the length of this object will change when truncating. + // This points to the same memory. Only the length of this object will change when + // truncating. let ret = builder.uninit(); builder.assign(&ret, nested_opened_values_buffer.clone()); ret diff --git a/extensions/native/recursion/src/fri/two_adic_pcs.rs b/extensions/native/recursion/src/fri/two_adic_pcs.rs index ed61bb4c9d..a6527482f1 100644 --- a/extensions/native/recursion/src/fri/two_adic_pcs.rs +++ b/extensions/native/recursion/src/fri/two_adic_pcs.rs @@ -23,15 +23,16 @@ pub const MAX_TWO_ADICITY: usize = 27; /// Notes: /// 1. FieldMerkleTreeMMCS sorts traces by height in descending order when committing data. -/// 2. **Required** that `C::F` has two-adicity <= [MAX_TWO_ADICITY]. In particular this implies that all LDE matrices have -/// `log2(lde_height) <= MAX_TWO_ADICITY`. +/// 2. **Required** that `C::F` has two-adicity <= [MAX_TWO_ADICITY]. In particular this implies +/// that all LDE matrices have `log2(lde_height) <= MAX_TWO_ADICITY`. /// 3. **Required** that the maximum trace height is `2^log_max_height - 1`. /// /// Reference: /// /// So traces are sorted in `opening_proof`. /// -/// 2. FieldMerkleTreeMMCS::poseidon2 keeps the raw values in the original order. So traces are not sorted in `opened_values`. +/// 2. FieldMerkleTreeMMCS::poseidon2 keeps the raw values in the original order. So traces are not +/// sorted in `opened_values`. /// /// Reference: /// @@ -149,7 +150,8 @@ pub fn verify_two_adic_pcs( }; builder.cycle_tracker_end("pre-compute-rounds-context"); - // Accumulators of the reduced opening sums, reset per query. The array `ro` is indexed by log_height. + // Accumulators of the reduced opening sums, reset per query. The array `ro` is indexed by + // log_height. let ro: Array> = builder.array(MAX_TWO_ADICITY + 1); let alpha_pow: Array> = builder.array(MAX_TWO_ADICITY + 1); @@ -158,21 +160,23 @@ pub fn verify_two_adic_pcs( let index_bits = challenger.sample_bits(builder, log_max_lde_height); // We reset the reduced opening accumulators at the start of each query. - // We describe what `ro[log_height]` computes per query in pseduo-code, where `log_height` is log2 of the size of the LDE domain: - // ro[log_height] = 0 + // We describe what `ro[log_height]` computes per query in pseduo-code, where `log_height` + // is log2 of the size of the LDE domain: ro[log_height] = 0 // alpha_pow[log_height] = 1 // for round in rounds: - // for mat in round.mats where (mat.domain.log_n + log_blowup == log_height): // preserving order of round.mats - // // g is generator of F + // for mat in round.mats where (mat.domain.log_n + log_blowup == log_height): // + // preserving order of round.mats // g is generator of F // // w_{log_height} is generator of subgroup of F of order 2^log_height - // x = g * w_{log_height}^{reverse_bits(index >> (log_max_height - log_height), log_height)} - // // reverse_bits(x, bits) takes an unsigned integer x with `bits` bits and returns the unsigned integer with the bits of x reversed. - // // x is a rotated evaluation point in a coset of the LDE domain. - // ps_at_x = [claimed evaluation of p at x for each polynomial p corresponding to column of mat] + // x = g * w_{log_height}^{reverse_bits(index >> (log_max_height - log_height), + // log_height)} // reverse_bits(x, bits) takes an unsigned integer x with + // `bits` bits and returns the unsigned integer with the bits of x reversed. // + // x is a rotated evaluation point in a coset of the LDE domain. ps_at_x = + // [claimed evaluation of p at x for each polynomial p corresponding to column of mat] // // ps_at_x is array of Felt // for (z, ps_at_z) in zip(mat.points, mat.values): - // // z is an out of domain point in Ext. There may be multiple per round to account for rotations in AIR constraints. - // // ps_at_z is array of Ext for [claimed evaluation of p at z for each polyomial p corresponding to column of mat] + // // z is an out of domain point in Ext. There may be multiple per round to account + // for rotations in AIR constraints. // ps_at_z is array of Ext for [claimed + // evaluation of p at z for each polyomial p corresponding to column of mat] // for (p_at_x, p_at_z) in zip(ps_at_x, ps_at_z): // ro[log_height] += alpha_pow[log_height] * (p_at_x - p_at_z) / (x - z) // alpha_pow[log_height] *= alpha @@ -204,11 +208,13 @@ pub fn verify_two_adic_pcs( // b = index_bits // w = generator of order 2^log_max_height - // we first compute `w ** (b[0] * 2^(log_max_height - 1) + ... + b[log_max_height - 1])` using a square-and-multiply algorithm. + // we first compute `w ** (b[0] * 2^(log_max_height - 1) + ... + b[log_max_height - 1])` + // using a square-and-multiply algorithm. let res = builder.exp_bits_big_endian(w, &index_bits_truncated); // we now compute: - // tag_exp[log_max_height - i] = g * w ** (b[log_max_height - i] * 2^(log_max_height - 1) + ... + b[log_max_height - 1] * 2^(log_max_height - i)) + // tag_exp[log_max_height - i] = g * w ** (b[log_max_height - i] * 2^(log_max_height - + // 1) + ... + b[log_max_height - 1] * 2^(log_max_height - i)) // using a square-and-divide algorithm. // g * res is tag_exp[0] // `tag_exp` is used below as a rotated evaluation point in a coset of the LDE domain. @@ -306,7 +312,8 @@ pub fn verify_two_adic_pcs( builder.assign(&cur_ro, cur_ro + (mat_ro * cur_alpha_pow / (z - x))); builder.assign(&cur_alpha_pow, cur_alpha_pow * mat_alpha_pow); } - // The buffer `mat_opening` has now been written to, so we set `is_init` to 1. + // The buffer `mat_opening` has now been written to, so we set `is_init` to + // 1. builder.assign(&is_init, C::N::ONE); builder.cycle_tracker_end("single-reduced-opening-eval"); }); @@ -537,8 +544,9 @@ fn compute_rounds_context( iter_zip!(builder, round.mats, ov_ptrs, mat_alpha_pows).for_each(|ptr_vec, builder| { let mat = builder.iter_ptr_get(&round.mats, ptr_vec[0]); let local = builder.get(&mat.values, 0); - // We allocate the underlying buffer for the current `ov_ptr` here. On allocation, it is uninit, and - // will be written to on the first call of `fri_single_reduced_opening_eval` for this `ov_ptr`. + // We allocate the underlying buffer for the current `ov_ptr` here. On allocation, it is + // uninit, and will be written to on the first call of + // `fri_single_reduced_opening_eval` for this `ov_ptr`. let buf = builder.array(local.len()); let width = buf.len(); builder.iter_ptr_set(&ov_ptrs, ptr_vec[1], buf); diff --git a/extensions/native/recursion/src/halo2/mod.rs b/extensions/native/recursion/src/halo2/mod.rs index cca9000214..b53a298eb4 100644 --- a/extensions/native/recursion/src/halo2/mod.rs +++ b/extensions/native/recursion/src/halo2/mod.rs @@ -17,7 +17,6 @@ use openvm_stark_backend::p3_field::extension::BinomialExtensionField; use openvm_stark_sdk::{p3_baby_bear::BabyBear, p3_bn254_fr::Bn254Fr}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use snark_verifier_sdk::{ - evm::encode_calldata, halo2::{gen_dummy_snark_from_vk, gen_snark_shplonk}, snark_verifier::halo2_base::{ gates::{ @@ -57,7 +56,8 @@ pub struct DslOperations { } /// Necessary metadata to prove a Halo2 circuit -/// Attention: Deserializer of this struct is not generic. It only works for verifier/wrapper circuit. +/// Attention: Deserializer of this struct is not generic. It only works for verifier/wrapper +/// circuit. #[derive(Debug, Clone)] pub struct Halo2ProvingPinning { pub pk: ProvingKey, @@ -74,8 +74,9 @@ pub struct Halo2ProvingMetadata { impl RawEvmProof { /// Return bytes calldata to be passed to the verifier contract. + #[cfg(feature = "evm-prove")] pub fn verifier_calldata(&self) -> Vec { - encode_calldata(&[self.instances.clone()], &self.proof) + snark_verifier_sdk::evm::encode_calldata(&[self.instances.clone()], &self.proof) } } diff --git a/extensions/native/recursion/src/halo2/wrapper.rs b/extensions/native/recursion/src/halo2/wrapper.rs index e890430c7c..958c502a86 100644 --- a/extensions/native/recursion/src/halo2/wrapper.rs +++ b/extensions/native/recursion/src/halo2/wrapper.rs @@ -2,33 +2,31 @@ use itertools::Itertools; use openvm_stark_backend::p3_util::log2_ceil_usize; use serde::{Deserialize, Serialize}; use serde_with::serde_as; +#[cfg(feature = "evm-prove")] +use snark_verifier_sdk::snark_verifier::{ + halo2_base::halo2_proofs::{halo2curves::bn256::G1Affine, plonk::VerifyingKey}, + loader::evm::compile_solidity, +}; use snark_verifier_sdk::{ - evm::{evm_verify, gen_evm_proof_shplonk, gen_evm_verifier_sol_code}, halo2::aggregation::{AggregationCircuit, AggregationConfigParams, VerifierUniversality}, - snark_verifier::{ - halo2_base::{ - gates::circuit::{ - CircuitBuilderStage, - CircuitBuilderStage::{Keygen, Prover}, - }, - halo2_proofs::{ - halo2curves::bn256::G1Affine, - plonk::{keygen_pk2, VerifyingKey}, - poly::commitment::Params, - }, - }, - loader::evm::compile_solidity, + snark_verifier::halo2_base::{ + gates::circuit::CircuitBuilderStage, + halo2_proofs::{plonk::keygen_pk2, poly::commitment::Params}, }, CircuitExt, Snark, SHPLONK, }; +#[cfg(feature = "evm-prove")] +use crate::halo2::RawEvmProof; use crate::halo2::{ utils::{Halo2ParamsReader, KZG_PARAMS_FOR_SVK}, - Halo2Params, Halo2ProvingMetadata, Halo2ProvingPinning, RawEvmProof, + Halo2Params, Halo2ProvingMetadata, Halo2ProvingPinning, }; +/// `FallbackEvmVerifier` is for the raw verifier contract outputted by +/// `snark-verifier`. #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct EvmVerifier { +pub struct FallbackEvmVerifier { pub sol_code: String, pub artifact: EvmVerifierByteCode, } @@ -61,7 +59,8 @@ impl Halo2WrapperProvingKey { let k = params.k(); #[cfg(feature = "bench-metrics")] let start = std::time::Instant::now(); - let mut circuit = generate_wrapper_circuit_object(Keygen, k as usize, dummy_snark); + let mut circuit = + generate_wrapper_circuit_object(CircuitBuilderStage::Keygen, k as usize, dummy_snark); circuit.calculate_params(Some(MIN_ROWS)); let config_params = circuit.builder.config_params.clone(); tracing::info!( @@ -85,16 +84,21 @@ impl Halo2WrapperProvingKey { }, } } + #[cfg(feature = "evm-verify")] /// A helper function for testing to verify the proof of this circuit with evm verifier. - pub fn evm_verify(evm_verifier: &EvmVerifier, evm_proof: &RawEvmProof) -> Result { - evm_verify( + pub fn evm_verify( + evm_verifier: &FallbackEvmVerifier, + evm_proof: &RawEvmProof, + ) -> Result { + snark_verifier_sdk::evm::evm_verify( evm_verifier.artifact.bytecode.clone(), vec![evm_proof.instances.clone()], evm_proof.proof.clone(), ) } + #[cfg(feature = "evm-prove")] /// Return deployment code for EVM verifier which can verify the snark of this circuit. - pub fn generate_evm_verifier(&self, params: &Halo2Params) -> EvmVerifier { + pub fn generate_fallback_evm_verifier(&self, params: &Halo2Params) -> FallbackEvmVerifier { assert_eq!( self.pinning.metadata.config_params.k as u32, params.k(), @@ -106,6 +110,7 @@ impl Halo2WrapperProvingKey { self.pinning.metadata.num_pvs.clone(), ) } + #[cfg(feature = "evm-prove")] pub fn prove_for_evm(&self, params: &Halo2Params, snark_to_verify: Snark) -> RawEvmProof { #[cfg(feature = "bench-metrics")] let start = std::time::Instant::now(); @@ -113,7 +118,12 @@ impl Halo2WrapperProvingKey { let prover_circuit = self.generate_circuit_object_for_proving(k, snark_to_verify); let mut pvs = prover_circuit.instances(); assert_eq!(pvs.len(), 1); - let proof = gen_evm_proof_shplonk(params, &self.pinning.pk, prover_circuit, pvs.clone()); + let proof = snark_verifier_sdk::evm::gen_evm_proof_shplonk( + params, + &self.pinning.pk, + prover_circuit, + pvs.clone(), + ); #[cfg(feature = "bench-metrics")] metrics::gauge!("total_proof_time_ms").set(start.elapsed().as_millis() as f64); @@ -122,6 +132,8 @@ impl Halo2WrapperProvingKey { proof, } } + + #[cfg(feature = "evm-prove")] fn generate_circuit_object_for_proving( &self, k: usize, @@ -136,7 +148,7 @@ impl Halo2WrapperProvingKey { self.pinning.metadata.num_pvs[0], snark_to_verify.instances[0].len() + 12, ); - generate_wrapper_circuit_object(Prover, k, snark_to_verify) + generate_wrapper_circuit_object(CircuitBuilderStage::Prover, k, snark_to_verify) .use_params( self.pinning .metadata @@ -152,7 +164,11 @@ impl Halo2WrapperProvingKey { let mut k = 20; let mut first_run = true; loop { - let mut circuit = generate_wrapper_circuit_object(Keygen, k, dummy_snark.clone()); + let mut circuit = generate_wrapper_circuit_object( + CircuitBuilderStage::Keygen, + k, + dummy_snark.clone(), + ); circuit.calculate_params(Some(MIN_ROWS)); assert_eq!( circuit.builder.config_params.num_advice_per_phase.len(), @@ -205,15 +221,19 @@ fn emit_wrapper_circuit_metrics(agg_circuit: &AggregationCircuit) { metrics::gauge!("halo2_total_cells").set(total_cell as f64); } +#[cfg(feature = "evm-prove")] fn gen_evm_verifier( params: &Halo2Params, vk: &VerifyingKey, num_instance: Vec, -) -> EvmVerifier { - let sol_code = - gen_evm_verifier_sol_code::(params, vk, num_instance); +) -> FallbackEvmVerifier { + let sol_code = snark_verifier_sdk::evm::gen_evm_verifier_sol_code::( + params, + vk, + num_instance, + ); let byte_code = compile_solidity(&sol_code); - EvmVerifier { + FallbackEvmVerifier { sol_code, artifact: EvmVerifierByteCode { sol_compiler_version: "0.8.19".to_string(), diff --git a/extensions/native/recursion/src/stark/mod.rs b/extensions/native/recursion/src/stark/mod.rs index 9ee94d55d8..f3f4a8106f 100644 --- a/extensions/native/recursion/src/stark/mod.rs +++ b/extensions/native/recursion/src/stark/mod.rs @@ -48,7 +48,8 @@ pub struct VerifierProgram { } impl VerifierProgram { - /// Create a new instance of the program for the [`openvm_stark_sdk::config::baby_bear_poseidon2`] + /// Create a new instance of the program for the + /// [`openvm_stark_sdk::config::baby_bear_poseidon2`] pub fn build( constants: MultiStarkVerificationAdvice, fri_params: &FriParameters, @@ -60,7 +61,8 @@ impl VerifierProgram { Self::build_with_options(constants, fri_params, options) } - /// Create a new instance of the program for the [`openvm_stark_sdk::config::baby_bear_poseidon2`] + /// Create a new instance of the program for the + /// [`openvm_stark_sdk::config::baby_bear_poseidon2`] pub fn build_with_options( constants: MultiStarkVerificationAdvice, fri_params: &FriParameters, @@ -166,7 +168,8 @@ where // (T01b): `num_challenges_to_sample.len() < 2`. let num_phases = RVar::from(num_challenges_to_sample.len()); - // Here the shape of `exposed_values_after_challenge` is not verified. But it's verified later (T01c). + // Here the shape of `exposed_values_after_challenge` is not verified. But it's verified + // later (T01c). assert_cumulative_sums(builder, air_proofs, &num_challenges_to_sample); let air_perm_by_height = if builder.flags.static_only { @@ -184,8 +187,8 @@ where builder.assert_less_than_slow_small_rhs(perm_i.clone(), num_airs); builder.set_value(&mask, perm_i.clone(), one.clone()); }); - // Check that each index of mask was set, i.e., that `air_perm_by_height` is a permutation. - // Also check that permutation is decreasing by height. + // Check that each index of mask was set, i.e., that `air_perm_by_height` is a + // permutation. Also check that permutation is decreasing by height. let prev_log_height_plus_one: Usize<_> = builder.eval(RVar::from(MAX_TWO_ADICITY - pcs.config.log_blowup + 1)); iter_zip!(builder, air_perm_by_height).for_each(|ptr_vec, builder| { @@ -207,7 +210,8 @@ where }; // (T02a): `air_perm_by_height` is a valid permutation of `0..num_airs`. // (T02b): For all `i`, `air_proofs[i].log_degree <= MAX_TWO_ADICITY - log_blowup`. - // (T02c): For all `0<=i= air_proofs[air_perm_by_height[i+1]].log_degree`. + // (T02c): For all `0<=i= + // air_proofs[air_perm_by_height[i+1]].log_degree`. let log_max_height = { let index = builder.get(air_perm_by_height, RVar::zero()); let air_proof = builder.get(air_proofs, index); @@ -449,7 +453,8 @@ where builder.assert_usize_eq(prep_width, prep.local.len()); builder.assert_usize_eq(prep_width, prep.next.len()); - // Assumption: each AIR with preprocessed trace has its own commitment and opening values + // Assumption: each AIR with preprocessed trace has its own commitment and + // opening values let values = builder.array::>(2); builder.set_value(&values, 0, prep.local); builder.set_value(&values, 1, prep.next); diff --git a/extensions/native/recursion/src/types.rs b/extensions/native/recursion/src/types.rs index 2a34b31d7a..36de6416e9 100644 --- a/extensions/native/recursion/src/types.rs +++ b/extensions/native/recursion/src/types.rs @@ -22,8 +22,9 @@ pub struct StarkVerificationAdvice { pub preprocessed_data: Option>, /// Trace sub-matrix widths pub width: TraceWidth, - /// The factor to multiply the trace degree by to get the degree of the quotient polynomial. Determined from the max constraint degree of the AIR constraints. - /// This is equivalently the number of chunks the quotient polynomial is split into. + /// The factor to multiply the trace degree by to get the degree of the quotient polynomial. + /// Determined from the max constraint degree of the AIR constraints. This is equivalently + /// the number of chunks the quotient polynomial is split into. pub quotient_degree: usize, /// Number of public values for this STARK only pub num_public_values: usize, diff --git a/extensions/native/recursion/src/utils.rs b/extensions/native/recursion/src/utils.rs index 0cc9c4697e..3c018cc0c2 100644 --- a/extensions/native/recursion/src/utils.rs +++ b/extensions/native/recursion/src/utils.rs @@ -57,8 +57,8 @@ pub fn split_32(builder: &mut Builder, val: Var, n: usize) - felts[0..n].to_vec() } -/// Eval two expressions, return in the reversed order if cond == 1. Otherwise, return in the original order. -/// This is a helper function for optimal performance. +/// Eval two expressions, return in the reversed order if cond == 1. Otherwise, return in the +/// original order. This is a helper function for optimal performance. pub fn cond_eval + CanSelect>( builder: &mut Builder, cond: Var, diff --git a/extensions/native/recursion/src/vars.rs b/extensions/native/recursion/src/vars.rs index 287d915f17..b295fd6714 100644 --- a/extensions/native/recursion/src/vars.rs +++ b/extensions/native/recursion/src/vars.rs @@ -74,8 +74,9 @@ pub struct StarkVerificationAdviceVariable { pub preprocessed_data: Array>, /// Trace sub-matrix widths pub width: TraceWidthVariable, - /// The factor to multiply the trace degree by to get the degree of the quotient polynomial. Determined from the max constraint degree of the AIR constraints. - /// This is equivalently the number of chunks the quotient polynomial is split into. + /// The factor to multiply the trace degree by to get the degree of the quotient polynomial. + /// Determined from the max constraint degree of the AIR constraints. This is equivalently + /// the number of chunks the quotient polynomial is split into. pub log_quotient_degree: Usize, /// Number of public values for this STARK only pub num_public_values: Usize, diff --git a/extensions/native/recursion/src/view.rs b/extensions/native/recursion/src/view.rs index 7b48187617..4df191268c 100644 --- a/extensions/native/recursion/src/view.rs +++ b/extensions/native/recursion/src/view.rs @@ -87,7 +87,8 @@ pub fn get_advice_per_air( } else { OptionalVar { is_some: Usize::from(1), - // Because `C::F::ORDER_U32` is prime and `max_coefficient > 1`, `floor(C::F::ORDER_U32 / max_coefficient) * max_coefficient < C::F::ORDER_U32`, + // Because `C::F::ORDER_U32` is prime and `max_coefficient > 1`, + // `floor(C::F::ORDER_U32 / max_coefficient) * max_coefficient < C::F::ORDER_U32`, // `height * max_coefficient` cannot overflow `C::F`. value: builder.constant(C::N::from_canonical_u32( C::F::ORDER_U32 / max_coefficient + 1, diff --git a/extensions/pairing/circuit/src/config.rs b/extensions/pairing/circuit/src/config.rs index 97bd242e32..4914fa433c 100644 --- a/extensions/pairing/circuit/src/config.rs +++ b/extensions/pairing/circuit/src/config.rs @@ -1,10 +1,6 @@ -use derive_more::derive::From; use openvm_algebra_circuit::*; -use openvm_circuit::arch::{ - SystemConfig, SystemExecutor, SystemPeriphery, VmChipComplex, VmConfig, VmInventoryError, -}; -use openvm_circuit_derive::{AnyEnum, InstructionExecutor, VmConfig}; -use openvm_circuit_primitives_derive::{Chip, ChipUsageGetter}; +use openvm_circuit::arch::SystemConfig; +use openvm_circuit_derive::VmConfig; use openvm_ecc_circuit::*; use openvm_rv32im_circuit::*; use openvm_stark_backend::p3_field::PrimeField32; diff --git a/extensions/pairing/circuit/src/fp12_chip/tests.rs b/extensions/pairing/circuit/src/fp12_chip/tests.rs index 8d1dbc2293..a9f6b235d5 100644 --- a/extensions/pairing/circuit/src/fp12_chip/tests.rs +++ b/extensions/pairing/circuit/src/fp12_chip/tests.rs @@ -237,7 +237,8 @@ fn test_fp12_sub_bls12381() { ); } -// NOTE[yj]: This test requires RUST_MIN_STACK=8388608 to run without overflowing the stack, so it is ignored by the test runner for now +// NOTE[yj]: This test requires RUST_MIN_STACK=8388608 to run without overflowing the stack, so it +// is ignored by the test runner for now #[test] #[ignore] fn test_fp12_mul_bls12381() { diff --git a/extensions/pairing/circuit/src/pairing_chip/miller_double_and_add_step.rs b/extensions/pairing/circuit/src/pairing_chip/miller_double_and_add_step.rs index de98b4e18f..77084428c9 100644 --- a/extensions/pairing/circuit/src/pairing_chip/miller_double_and_add_step.rs +++ b/extensions/pairing/circuit/src/pairing_chip/miller_double_and_add_step.rs @@ -19,7 +19,8 @@ use openvm_rv32_adapters::Rv32VecHeapAdapterChip; use openvm_stark_backend::p3_field::PrimeField32; // Input: two AffinePoint: 4 field elements each -// Output: (AffinePoint, UnevaluatedLine, UnevaluatedLine) -> 2*2 + 2*2 + 2*2 = 12 field elements +// Output: (AffinePoint, UnevaluatedLine, UnevaluatedLine) -> 2*2 + 2*2 + 2*2 = 12 +// field elements #[derive(Chip, ChipUsageGetter, InstructionExecutor)] pub struct MillerDoubleAndAddStepChip< F: PrimeField32, diff --git a/extensions/pairing/guest/src/bls12_381/mod.rs b/extensions/pairing/guest/src/bls12_381/mod.rs index 707d3ba73a..a23d35d507 100644 --- a/extensions/pairing/guest/src/bls12_381/mod.rs +++ b/extensions/pairing/guest/src/bls12_381/mod.rs @@ -158,8 +158,8 @@ impl PairingIntrinsics for Bls12_381 { const FP2_TWO: Fp2 = Fp2::new(Fp::from_const_u8(2), Fp::from_const_u8(0)); const FP2_THREE: Fp2 = Fp2::new(Fp::from_const_u8(3), Fp::from_const_u8(0)); - // Multiplication constants for the Frobenius map for coefficients in Fp2 c1..=c5 for powers 0..12 - // FROBENIUS_COEFFS\[i\]\[j\] = \xi^{(j + 1) * (p^i - 1)/6} when p = 1 (mod 6) + // Multiplication constants for the Frobenius map for coefficients in Fp2 c1..=c5 for powers + // 0..12 FROBENIUS_COEFFS\[i\]\[j\] = \xi^{(j + 1) * (p^i - 1)/6} when p = 1 (mod 6) // These are validated against `halo2curves::bls12_381::FROBENIUS_COEFF_FQ12_C1` in tests.rs const FROBENIUS_COEFFS: [[Self::Fp2; 5]; 12] = [ [ diff --git a/extensions/pairing/guest/src/bls12_381/pairing.rs b/extensions/pairing/guest/src/bls12_381/pairing.rs index f9a022d0cf..9cd7ade4a5 100644 --- a/extensions/pairing/guest/src/bls12_381/pairing.rs +++ b/extensions/pairing/guest/src/bls12_381/pairing.rs @@ -74,8 +74,8 @@ impl LineMulMType for Bls12_381 { /// Multiplies a line in 02345-form with a Fp12 element to get an Fp12 element fn mul_by_023(f: &Fp12, l: &EvaluatedLine) -> Fp12 { - // this is only used if the number of lines is odd, which doesn't happen for our applications - // right now, so we can use this suboptimal implementation + // this is only used if the number of lines is odd, which doesn't happen for our + // applications right now, so we can use this suboptimal implementation Fp12::from_evaluated_line_m_type(l.clone()) * f } @@ -110,9 +110,9 @@ impl LineMulMType for Bls12_381 { let s4 = &self_coeffs[3]; let s5 = &self_coeffs[5]; - // NOTE[yj]: Hand-calculated multiplication for Fp12 * 02345 ∈ Fp2; this is likely not the most efficient implementation - // c00 = cs0co0 + xi(cs1co2 + cs2co1 + cs3co5 + cs4co4) - // c01 = cs0co1 + cs1co0 + xi(cs2co2 + cs4co5 + cs5co4) + // NOTE[yj]: Hand-calculated multiplication for Fp12 * 02345 ∈ Fp2; this is likely not the + // most efficient implementation c00 = cs0co0 + xi(cs1co2 + cs2co1 + cs3co5 + + // cs4co4) c01 = cs0co1 + cs1co0 + xi(cs2co2 + cs4co5 + cs5co4) // c02 = cs0co2 + cs1co1 + cs2co0 + cs3co4 + xi(cs5co5) // c10 = cs3co0 + xi(cs1co5 + cs2co4 + cs4co2 + cs5co1) // c11 = cs0co4 + cs3co1 + cs4co0 + xi(cs2co5 + cs5co2) @@ -154,7 +154,8 @@ impl MultiMillerLoop for Bls12_381 { f } - /// The expected output of this function when running the Miller loop with embedded exponent is c^3 * l_{3Q} + /// The expected output of this function when running the Miller loop with embedded exponent is + /// c^3 * l_{3Q} fn pre_loop( Q_acc: Vec>, Q: &[AffinePoint], @@ -162,8 +163,9 @@ impl MultiMillerLoop for Bls12_381 { xy_fracs: &[(Self::Fp, Self::Fp)], ) -> (Self::Fp12, Vec>) { let mut f = if let Some(mut c) = c { - // for the miller loop with embedded exponent, f will be set to c at the beginning of the function, and we - // will multiply by c again due to the last two values of the pseudo-binary encoding (BLS12_381_PSEUDO_BINARY_ENCODING) being 1. + // for the miller loop with embedded exponent, f will be set to c at the beginning of + // the function, and we will multiply by c again due to the last two values + // of the pseudo-binary encoding (BLS12_381_PSEUDO_BINARY_ENCODING) being 1. // Therefore, the final value of f at the end of this block is c^3. let mut c3 = c.clone(); c.square_assign(); @@ -176,8 +178,9 @@ impl MultiMillerLoop for Bls12_381 { let mut Q_acc = Q_acc; // Special case the first iteration of the Miller loop with pseudo_binary_encoding = 1: - // this means that the first step is a double and add, but we need to separate the two steps since the optimized - // `miller_double_and_add_step` will fail because Q_acc is equal to Q_signed on the first iteration + // this means that the first step is a double and add, but we need to separate the two steps + // since the optimized `miller_double_and_add_step` will fail because Q_acc is equal + // to Q_signed on the first iteration let (Q_out_double, lines_2S) = Q_acc .into_iter() .map(|Q| Self::miller_double_step(&Q)) @@ -210,7 +213,8 @@ impl MultiMillerLoop for Bls12_381 { (f, Q_acc) } - /// After running the main body of the Miller loop, we conjugate f due to the curve seed x being negative. + /// After running the main body of the Miller loop, we conjugate f due to the curve seed x being + /// negative. fn post_loop( f: &Self::Fp12, Q_acc: Vec>, @@ -272,7 +276,8 @@ impl PairingCheck for Bls12_381 { #[cfg(target_os = "zkvm")] { let hint = MaybeUninit::<(Fp12, Fp12)>::uninit(); - // We do not rely on the slice P's memory layout since rust does not guarantee it across compiler versions. + // We do not rely on the slice P's memory layout since rust does not guarantee it across + // compiler versions. let p_fat_ptr = (P.as_ptr() as u32, P.len() as u32); let q_fat_ptr = (Q.as_ptr() as u32, Q.len() as u32); unsafe { @@ -304,8 +309,8 @@ impl PairingCheck for Bls12_381 { #[allow(non_snake_case)] impl Bls12_381 { - // The paper only describes the implementation for Bn254, so we use the gnark implementation for Bls12_381. - // Adapted from the gnark implementation: + // The paper only describes the implementation for Bn254, so we use the gnark implementation for + // Bls12_381. Adapted from the gnark implementation: // https://github.com/Consensys/gnark/blob/af754dd1c47a92be375930ae1abfbd134c5310d8/std/algebra/emulated/fields_bls12381/e12_pairing.go#L394C1-L395C1 fn try_honest_pairing_check( P: &[AffinePoint<::Fp>], @@ -315,13 +320,14 @@ impl Bls12_381 { // The gnark implementation checks that f * s = c^{q - x} where x is the curve seed. // We check an equivalent condition: f * c^x * s = c^q. - // This is because we can compute f * c^x by embedding the c^x computation in the miller loop. + // This is because we can compute f * c^x by embedding the c^x computation in the miller + // loop. // We compute c^q before c is consumed by conjugate() below let c_q = FieldExtension::frobenius_map(&c, 1); - // Since the Bls12_381 curve has a negative seed, the miller loop for Bls12_381 is computed as - // f_{Miller,x,Q}(P) = conjugate( f_{Miller,-x,Q}(P) * c^{-x} ). + // Since the Bls12_381 curve has a negative seed, the miller loop for Bls12_381 is computed + // as f_{Miller,x,Q}(P) = conjugate( f_{Miller,-x,Q}(P) * c^{-x} ). // We will pass in the conjugate inverse of c into the miller loop so that we compute // fc = conjugate( f_{Miller,-x,Q}(P) * c'^{-x} ) (where c' is the conjugate inverse of c) // = f_{Miller,x,Q}(P) * c^x diff --git a/extensions/pairing/guest/src/bn254/mod.rs b/extensions/pairing/guest/src/bn254/mod.rs index 1c7bfaa2cd..7a620e74e9 100644 --- a/extensions/pairing/guest/src/bn254/mod.rs +++ b/extensions/pairing/guest/src/bn254/mod.rs @@ -227,8 +227,8 @@ impl PairingIntrinsics for Bn254 { const XI: Fp2 = Fp2::new(Fp::from_const_u8(9), Fp::from_const_u8(1)); const FP2_TWO: Fp2 = Fp2::new(Fp::from_const_u8(2), Fp::from_const_u8(0)); const FP2_THREE: Fp2 = Fp2::new(Fp::from_const_u8(3), Fp::from_const_u8(0)); - // Multiplication constants for the Frobenius map for coefficients in Fp2 c1..=c5 for powers 0..12 - // FROBENIUS_COEFFS\[i\]\[j\] = \xi^{(j + 1) * (p^i - 1)/6} when p = 1 (mod 6) + // Multiplication constants for the Frobenius map for coefficients in Fp2 c1..=c5 for powers + // 0..12 FROBENIUS_COEFFS\[i\]\[j\] = \xi^{(j + 1) * (p^i - 1)/6} when p = 1 (mod 6) // These are validated against `halo2curves::bn256::FROBENIUS_COEFF_FQ12_C1` in tests.rs // (Note that bn256 here is another name for bn254) const FROBENIUS_COEFFS: [[Self::Fp2; 5]; 12] = [ diff --git a/extensions/pairing/guest/src/bn254/pairing.rs b/extensions/pairing/guest/src/bn254/pairing.rs index 34abd8da7b..25a0d6b7fe 100644 --- a/extensions/pairing/guest/src/bn254/pairing.rs +++ b/extensions/pairing/guest/src/bn254/pairing.rs @@ -107,9 +107,9 @@ impl LineMulDType for Bn254 { let s4 = &self_coeffs[3]; let s5 = &self_coeffs[5]; - // NOTE[yj]: Hand-calculated multiplication for Fp12 * 01234 ∈ Fp2; this is likely not the most efficient implementation - // c00 = cs0co0 + xi(cs1co2 + cs2co1 + cs4co4 + cs5co3) - // c01 = cs0co1 + cs1co0 + cs3co3 + xi(cs2co2 + cs5co4) + // NOTE[yj]: Hand-calculated multiplication for Fp12 * 01234 ∈ Fp2; this is likely not the + // most efficient implementation c00 = cs0co0 + xi(cs1co2 + cs2co1 + cs4co4 + + // cs5co3) c01 = cs0co1 + cs1co0 + cs3co3 + xi(cs2co2 + cs5co4) // c02 = cs0co2 + cs1co1 + cs2co0 + cs3co4 + cs4co3 // c10 = cs0co3 + cs3co0 + xi(cs2co4 + cs4co2 + cs5co1) // c11 = cs0co4 + cs1co3 + cs3co1 + cs4co0 + xi(cs5co2) @@ -150,7 +150,8 @@ impl MultiMillerLoop for Bn254 { f } - /// The expected output of this function when running the Miller loop with embedded exponent is c^2 * l_{2Q} + /// The expected output of this function when running the Miller loop with embedded exponent is + /// c^2 * l_{2Q} fn pre_loop( Q_acc: Vec>, _Q: &[AffinePoint], @@ -158,8 +159,9 @@ impl MultiMillerLoop for Bn254 { xy_fracs: &[(Self::Fp, Self::Fp)], ) -> (Self::Fp12, Vec>) { let mut f = if let Some(mut c) = c { - // for the miller loop with embedded exponent, f will be set to c at the beginning of the function, and we - // will square c due to the last two values of the pseudo-binary encoding (BN254_PSEUDO_BINARY_ENCODING) being 0 and 1. + // for the miller loop with embedded exponent, f will be set to c at the beginning of + // the function, and we will square c due to the last two values of the + // pseudo-binary encoding (BN254_PSEUDO_BINARY_ENCODING) being 0 and 1. // Therefore, the final value of f at the end of this block is c^2. c.square_assign(); c @@ -170,8 +172,9 @@ impl MultiMillerLoop for Bn254 { let mut Q_acc = Q_acc; let mut initial_lines = Vec::>::new(); - // We don't need to special case the first iteration for Bn254, but since we are using the same Miller loop implementation - // for both Bn254 and Bls12_381, we need to do the first iteration separately here. + // We don't need to special case the first iteration for Bn254, but since we are using the + // same Miller loop implementation for both Bn254 and Bls12_381, we need to do the + // first iteration separately here. let (Q_out_double, lines_2S) = Q_acc .into_iter() .map(|Q| Self::miller_double_step(&Q)) @@ -233,13 +236,15 @@ impl MultiMillerLoop for Bn254 { let q2_vec = Q .iter() .map(|Q| { - // There is a frobenius mapping π²(Q) that we skip here since it is equivalent to the identity mapping + // There is a frobenius mapping π²(Q) that we skip here since it is equivalent to + // the identity mapping let x = &Q.x * x_to_q_sq_minus_1_over_3; AffinePoint { x, y: Q.y.clone() } }) .collect::>(); - // compute l_{(6x+2)\Psi(Q) + \phi_p(\Psi(Q)), -(\phi_p)^2(\Psi(Q))} where \phi_p is the Frobenius map + // compute l_{(6x+2)\Psi(Q) + \phi_p(\Psi(Q)), -(\phi_p)^2(\Psi(Q))} where \phi_p is the + // Frobenius map let (Q_out_add, lines_S_plus_Q) = Q_acc .iter() .zip(q2_vec.iter()) @@ -306,7 +311,8 @@ impl PairingCheck for Bn254 { #[cfg(target_os = "zkvm")] { let hint = MaybeUninit::<(Fp12, Fp12)>::uninit(); - // We do not rely on the slice P's memory layout since rust does not guarantee it across compiler versions. + // We do not rely on the slice P's memory layout since rust does not guarantee it across + // compiler versions. let p_fat_ptr = (P.as_ptr() as u32, P.len() as u32); let q_fat_ptr = (Q.as_ptr() as u32, Q.len() as u32); unsafe { @@ -352,7 +358,8 @@ impl Bn254 { // By the theorem, it suffices to provide c and u such that f * u == c^λ. // Since λ = 6x + 2 + q^3 - q^2 + q, we will check the equivalent condition: // f * c^-{6x + 2} * u * c^-{q^3 - q^2 + q} == 1 - // This is because we can compute f * c^-{6x+2} by embedding the c^-{6x+2} computation in the miller loop. + // This is because we can compute f * c^-{6x+2} by embedding the c^-{6x+2} computation in + // the miller loop. // c_mul = c^-{q^3 - q^2 + q} let c_q3_inv = FieldExtension::frobenius_map(&c_inv, 3); diff --git a/extensions/pairing/guest/src/halo2curves_shims/bls12_381/final_exp.rs b/extensions/pairing/guest/src/halo2curves_shims/bls12_381/final_exp.rs index d89cc1f58c..b6bdf1221c 100644 --- a/extensions/pairing/guest/src/halo2curves_shims/bls12_381/final_exp.rs +++ b/extensions/pairing/guest/src/halo2curves_shims/bls12_381/final_exp.rs @@ -8,7 +8,8 @@ use openvm_ecc_guest::{ use super::{Bls12_381, FINAL_EXP_FACTOR, LAMBDA, POLY_FACTOR}; use crate::pairing::{FinalExp, MultiMillerLoop}; -// The paper only describes the implementation for Bn254, so we use the gnark implementation for Bls12_381. +// The paper only describes the implementation for Bn254, so we use the gnark implementation for +// Bls12_381. #[allow(non_snake_case)] impl FinalExp for Bls12_381 { type Fp = Fq; @@ -26,10 +27,11 @@ impl FinalExp for Bls12_381 { // The gnark implementation checks that f * s = c^{q - x} where x is the curve seed. // We check an equivalent condition: f * c^x * c^-q * s = 1. - // This is because we can compute f * c^x by embedding the c^x computation in the miller loop. + // This is because we can compute f * c^x by embedding the c^x computation in the miller + // loop. - // Since the Bls12_381 curve has a negative seed, the miller loop for Bls12_381 is computed as - // f_{Miller,x,Q}(P) = conjugate( f_{Miller,-x,Q}(P) * c^{-x} ). + // Since the Bls12_381 curve has a negative seed, the miller loop for Bls12_381 is computed + // as f_{Miller,x,Q}(P) = conjugate( f_{Miller,-x,Q}(P) * c^{-x} ). // We will pass in the conjugate inverse of c into the miller loop so that we compute // fc = f_{Miller,x,Q}(P) // = conjugate( f_{Miller,-x,Q}(P) * c'^{-x} ) (where c' is the conjugate inverse of c) @@ -66,7 +68,8 @@ impl FinalExp for Bls12_381 { root = f.exp_bytes(true, &exp.to_bytes_be()); let three_be = three.to_bytes_be(); - // NOTE[yj]: we can probably remove this first check as an optimization since we initizlize order_3rd_power to 0 + // NOTE[yj]: we can probably remove this first check as an optimization since we initizlize + // order_3rd_power to 0 if root == Fq12::ONE { order_3rd_power = 0; } diff --git a/extensions/pairing/guest/src/halo2curves_shims/bls12_381/miller_loop.rs b/extensions/pairing/guest/src/halo2curves_shims/bls12_381/miller_loop.rs index fa8d0bf767..add12a4b23 100644 --- a/extensions/pairing/guest/src/halo2curves_shims/bls12_381/miller_loop.rs +++ b/extensions/pairing/guest/src/halo2curves_shims/bls12_381/miller_loop.rs @@ -148,7 +148,8 @@ impl MultiMillerLoop for Bls12_381 { f } - /// The expected output of this function when running the Miller loop with embedded exponent is c^3 * l_{3Q} + /// The expected output of this function when running the Miller loop with embedded exponent is + /// c^3 * l_{3Q} fn pre_loop( Q_acc: Vec>, Q: &[AffinePoint], @@ -156,9 +157,10 @@ impl MultiMillerLoop for Bls12_381 { xy_fracs: &[(Fq, Fq)], ) -> (Fq12, Vec>) { let mut f = if let Some(mut c) = c { - // for the miller loop with embedded exponent, f will be set to c at the beginning of the function, and we - // will multiply by c again due to the last two values of the pseudo-binary encoding (BN12_381_PBE) being 1. - // Therefore, the final value of f at the end of this block is c^3. + // for the miller loop with embedded exponent, f will be set to c at the beginning of + // the function, and we will multiply by c again due to the last two values + // of the pseudo-binary encoding (BN12_381_PBE) being 1. Therefore, the + // final value of f at the end of this block is c^3. let mut c3 = c; c.square_assign(); c3 *= &c; @@ -170,8 +172,9 @@ impl MultiMillerLoop for Bls12_381 { let mut Q_acc = Q_acc; // Special case the first iteration of the Miller loop with pseudo_binary_encoding = 1: - // this means that the first step is a double and add, but we need to separate the two steps since the optimized - // `miller_double_and_add_step` will fail because Q_acc is equal to Q_signed on the first iteration + // this means that the first step is a double and add, but we need to separate the two steps + // since the optimized `miller_double_and_add_step` will fail because Q_acc is equal + // to Q_signed on the first iteration let (Q_out_double, lines_2S) = Q_acc .into_iter() .map(|Q| Self::miller_double_step(&Q)) @@ -204,7 +207,8 @@ impl MultiMillerLoop for Bls12_381 { (f, Q_acc) } - /// After running the main body of the Miller loop, we conjugate f due to the curve seed x being negative. + /// After running the main body of the Miller loop, we conjugate f due to the curve seed x being + /// negative. fn post_loop( f: &Fq12, Q_acc: Vec>, @@ -214,10 +218,11 @@ impl MultiMillerLoop for Bls12_381 { ) -> (Fq12, Vec>) { // Conjugate for negative component of the seed // Explanation: - // The general Miller loop formula implies that f_{-x} = 1/f_x. To avoid an inversion, we use the fact that - // for the final exponentiation, we only need the Miller loop result up to multiplication by some proper subfield - // of Fp12. Using the fact that Fp12 is a quadratic extension of Fp6, we have that f_x * conjugate(f_x) * 1/f_x lies in Fp6. - // Therefore we conjugate f_x instead of taking the inverse. + // The general Miller loop formula implies that f_{-x} = 1/f_x. To avoid an inversion, we + // use the fact that for the final exponentiation, we only need the Miller loop + // result up to multiplication by some proper subfield of Fp12. Using the fact that + // Fp12 is a quadratic extension of Fp6, we have that f_x * conjugate(f_x) * 1/f_x lies in + // Fp6. Therefore we conjugate f_x instead of taking the inverse. let f = f.conjugate(); (f, Q_acc) } diff --git a/extensions/pairing/guest/src/halo2curves_shims/bls12_381/tests/test_miller_loop.rs b/extensions/pairing/guest/src/halo2curves_shims/bls12_381/tests/test_miller_loop.rs index cd1f1db363..28e31e5878 100644 --- a/extensions/pairing/guest/src/halo2curves_shims/bls12_381/tests/test_miller_loop.rs +++ b/extensions/pairing/guest/src/halo2curves_shims/bls12_381/tests/test_miller_loop.rs @@ -81,7 +81,8 @@ fn test_f_mul() { Q_acc = Q_acc_init; - // Now Q_acc is in a state where we can do a left vs right side test of double-and-add vs double then add: + // Now Q_acc is in a state where we can do a left vs right side test of double-and-add vs double + // then add: // Left side test: Double and add let (Q_acc_daa, l_S_plus_Q, l_S_plus_Q_plus_S) = diff --git a/extensions/pairing/guest/src/halo2curves_shims/bn254/final_exp.rs b/extensions/pairing/guest/src/halo2curves_shims/bn254/final_exp.rs index ae7d581063..f4808e08b6 100644 --- a/extensions/pairing/guest/src/halo2curves_shims/bn254/final_exp.rs +++ b/extensions/pairing/guest/src/halo2curves_shims/bn254/final_exp.rs @@ -25,7 +25,8 @@ impl FinalExp for Bn254 { // By the theorem, it suffices to provide c and u such that f * u == c^λ. // Since λ = 6x + 2 + q^3 - q^2 + q, we will check the equivalent condition: // f * c^-{6x + 2} * u * c^-{q^3 - q^2 + q} == 1 - // This is because we can compute f * c^-{6x+2} by embedding the c^-{6x+2} computation in the miller loop. + // This is because we can compute f * c^-{6x+2} by embedding the c^-{6x+2} computation in + // the miller loop. // c_mul = c^-{q^3 - q^2 + q} let c_q3_inv = FieldExtension::frobenius_map(&c_inv, 3); diff --git a/extensions/pairing/guest/src/halo2curves_shims/bn254/miller_loop.rs b/extensions/pairing/guest/src/halo2curves_shims/bn254/miller_loop.rs index 7e9386731f..9d4ff02557 100644 --- a/extensions/pairing/guest/src/halo2curves_shims/bn254/miller_loop.rs +++ b/extensions/pairing/guest/src/halo2curves_shims/bn254/miller_loop.rs @@ -220,7 +220,8 @@ impl MultiMillerLoop for Bn254 { let q2_vec = Q .iter() .map(|Q| { - // There is a frobenius mapping π²(Q) that we skip here since it is equivalent to the identity mapping + // There is a frobenius mapping π²(Q) that we skip here since it is equivalent to + // the identity mapping let x = Q.x * x_to_q_sq_minus_1_over_3; AffinePoint { x, y: Q.y } }) diff --git a/extensions/pairing/guest/src/halo2curves_shims/bn254/tests/test_final_exp.rs b/extensions/pairing/guest/src/halo2curves_shims/bn254/tests/test_final_exp.rs index f56ebc11fb..e00535b5bf 100644 --- a/extensions/pairing/guest/src/halo2curves_shims/bn254/tests/test_final_exp.rs +++ b/extensions/pairing/guest/src/halo2curves_shims/bn254/tests/test_final_exp.rs @@ -54,10 +54,10 @@ fn assert_final_exp_one(a: &[i32; N], b: &[i32; N]) { Bn254::assert_final_exp_is_one(&f, &P_ecpoints, &Q_ecpoints); } -/// Generates test points for N number of points for an elliptic curve pairing, where the inputs `a` and `b` are -/// scalars of generators in G1 and G2, respectively. Importantly, for every even index, the generator P point is -/// negated (reflected an the x-axis). Outputs the vectors of P and Q points as well as the corresponding P and Q -/// EcPoint structs. +/// Generates test points for N number of points for an elliptic curve pairing, where the inputs `a` +/// and `b` are scalars of generators in G1 and G2, respectively. Importantly, for every even index, +/// the generator P point is negated (reflected an the x-axis). Outputs the vectors of P and Q +/// points as well as the corresponding P and Q EcPoint structs. #[allow(non_snake_case)] #[allow(clippy::type_complexity)] pub fn generate_test_points_generator_scalar( diff --git a/extensions/pairing/guest/src/pairing/miller_loop.rs b/extensions/pairing/guest/src/pairing/miller_loop.rs index 5a93ab25a9..c1ddf747e2 100644 --- a/extensions/pairing/guest/src/pairing/miller_loop.rs +++ b/extensions/pairing/guest/src/pairing/miller_loop.rs @@ -14,8 +14,8 @@ use super::{Evaluatable, EvaluatedLine, MillerStep, UnevaluatedLine}; pub trait MultiMillerLoop: MillerStep where ::Fp2: Field + FieldExtension, - // these trait bounds are needed for `multi_miller_loop_embedded_exp`. It would be better to move into - // a macro so the trait stays clean + // these trait bounds are needed for `multi_miller_loop_embedded_exp`. It would be better to + // move into a macro so the trait stays clean UnevaluatedLine: Evaluatable, for<'a> &'a Self::Fp: DivUnsafe<&'a Self::Fp, Output = Self::Fp>, for<'a> &'a Self::Fp2: Neg, @@ -56,8 +56,8 @@ where Self::multi_miller_loop_embedded_exp(P, Q, None) } - /// Runs the multi-Miller loop with an embedded exponent, removing the need to calculate the residue witness - /// in the final exponentiation step + /// Runs the multi-Miller loop with an embedded exponent, removing the need to calculate the + /// residue witness in the final exponentiation step /// /// `c` is assumed nonzero. fn multi_miller_loop_embedded_exp( diff --git a/extensions/pairing/guest/src/pairing/miller_step.rs b/extensions/pairing/guest/src/pairing/miller_step.rs index 53105bb01f..52f48630f7 100644 --- a/extensions/pairing/guest/src/pairing/miller_step.rs +++ b/extensions/pairing/guest/src/pairing/miller_step.rs @@ -47,8 +47,9 @@ where /// Assumptions: /// - s is not point at infinity. /// - a in the curve equation is 0. - /// The case y = 0 does not happen as long as the curve satisfies that 0 = X^3 + b has no solutions in Fp2. - /// The curve G1Affine and twist G2Affine are both chosen for bn254, bls12_381 so that this never happens. + /// The case y = 0 does not happen as long as the curve satisfies that 0 = X^3 + b has no + /// solutions in Fp2. The curve G1Affine and twist G2Affine are both chosen for bn254, + /// bls12_381 so that this never happens. fn miller_double_step( s: &AffinePoint, ) -> (AffinePoint, UnevaluatedLine) { @@ -102,8 +103,8 @@ where } /// Miller double and add step (2S + Q implemented as S + Q + S for efficiency). - /// Returns 2S+Q, a line in Fp12 passing through S and Q, and a line in Fp12 passing through S+Q and S - /// Assumption: Q != +- S && (S+Q) != +-S, so that there is no division by zero. + /// Returns 2S+Q, a line in Fp12 passing through S and Q, and a line in Fp12 passing through S+Q + /// and S Assumption: Q != +- S && (S+Q) != +-S, so that there is no division by zero. /// The way this is used in miller loop, this is always satisfied. fn miller_double_and_add_step( s: &AffinePoint, diff --git a/extensions/pairing/guest/src/pairing/mod.rs b/extensions/pairing/guest/src/pairing/mod.rs index 52e27dd825..3a751025e4 100644 --- a/extensions/pairing/guest/src/pairing/mod.rs +++ b/extensions/pairing/guest/src/pairing/mod.rs @@ -29,8 +29,8 @@ pub trait PairingIntrinsics { const PAIRING_IDX: usize; /// The sextic extension `Fp12` is `Fp2[X] / (X^6 - \xi)`, where `\xi` is a non-residue. const XI: Self::Fp2; - /// Multiplication constants for the Frobenius map for coefficients in Fp2 c1..=c5 for powers 0..12 - /// FROBENIUS_COEFFS\[i\]\[j\] = \xi^{(j + 1) * (p^i - 1)/6} when p = 1 (mod 6) + /// Multiplication constants for the Frobenius map for coefficients in Fp2 c1..=c5 for powers + /// 0..12 FROBENIUS_COEFFS\[i\]\[j\] = \xi^{(j + 1) * (p^i - 1)/6} when p = 1 (mod 6) const FROBENIUS_COEFFS: [[Self::Fp2; 5]; 12]; const FP2_TWO: Self::Fp2; diff --git a/extensions/pairing/guest/src/pairing/sextic_ext_field.rs b/extensions/pairing/guest/src/pairing/sextic_ext_field.rs index 8924fe1378..7f75815e05 100644 --- a/extensions/pairing/guest/src/pairing/sextic_ext_field.rs +++ b/extensions/pairing/guest/src/pairing/sextic_ext_field.rs @@ -67,8 +67,8 @@ pub(crate) fn sextic_tower_mul( where for<'a> &'a F: core::ops::Mul<&'a F, Output = F>, { - // The following multiplication is hand-derived with respect to the basis where degree 6 extension - // is composed of degree 3 extension followed by degree 2 extension. + // The following multiplication is hand-derived with respect to the basis where degree 6 + // extension is composed of degree 3 extension followed by degree 2 extension. // c0 = cs0co0 + xi(cs1co2 + cs2co1 + cs3co5 + cs4co4 + cs5co3) // c1 = cs0co1 + cs1co0 + cs3co3 + xi(cs2co2 + cs4co5 + cs5co4) diff --git a/extensions/pairing/tests/programs/examples/pairing_check_fallback.rs b/extensions/pairing/tests/programs/examples/pairing_check_fallback.rs index db9a8ce87b..7f47709097 100644 --- a/extensions/pairing/tests/programs/examples/pairing_check_fallback.rs +++ b/extensions/pairing/tests/programs/examples/pairing_check_fallback.rs @@ -76,10 +76,10 @@ mod bn254 { // f * c^x * c^-q * s = 1, // where fc = f * c'^x (embedded Miller loop with c conjugate inverse), // and the curve seed x = -0xd201000000010000 - // the miller loop computation includes a conjugation at the end because the value of the - // seed is negative, so we need to conjugate the miller loop input c as c'. We then substitute - // y = -x to get c^-y and finally compute c'^-y as input to the miller loop: - // f * c'^-y * c^-q * s = 1 + // the miller loop computation includes a conjugation at the end because the value of + // the seed is negative, so we need to conjugate the miller loop input c + // as c'. We then substitute y = -x to get c^-y and finally compute c'^-y + // as input to the miller loop: f * c'^-y * c^-q * s = 1 let c_q = FieldExtension::frobenius_map(&c, 1); let c_conj = c.conjugate(); if c_conj == Fp12::ZERO { @@ -191,10 +191,10 @@ mod bls12_381 { // f * c^x * c^-q * s = 1, // where fc = f * c'^x (embedded Miller loop with c conjugate inverse), // and the curve seed x = -0xd201000000010000 - // the miller loop computation includes a conjugation at the end because the value of the - // seed is negative, so we need to conjugate the miller loop input c as c'. We then substitute - // y = -x to get c^-y and finally compute c'^-y as input to the miller loop: - // f * c'^-y * c^-q * s = 1 + // the miller loop computation includes a conjugation at the end because the value of + // the seed is negative, so we need to conjugate the miller loop input c + // as c'. We then substitute y = -x to get c^-y and finally compute c'^-y + // as input to the miller loop: f * c'^-y * c^-q * s = 1 let c_q = FieldExtension::frobenius_map(&c, 1); let c_conj = c.conjugate(); if c_conj == Fp12::ZERO { diff --git a/extensions/pairing/tests/programs/examples/pairing_miller_step.rs b/extensions/pairing/tests/programs/examples/pairing_miller_step.rs index 7901e263e4..c32aefc7c1 100644 --- a/extensions/pairing/tests/programs/examples/pairing_miller_step.rs +++ b/extensions/pairing/tests/programs/examples/pairing_miller_step.rs @@ -37,8 +37,8 @@ mod bn254 { let mut pt_bytes = [0u8; 32 * 4]; let mut l_bytes = [0u8; 32 * 4]; - // TODO: if we ever need to change this, we should switch to using `StdIn::write` to serialize - // for us and use `read()` instead of `read_vec()` + // TODO: if we ever need to change this, we should switch to using `StdIn::write` to + // serialize for us and use `read()` instead of `read_vec()` pt_bytes[0..32].copy_from_slice(pt_cmp.x.c0.as_le_bytes()); pt_bytes[32..2 * 32].copy_from_slice(pt_cmp.x.c1.as_le_bytes()); pt_bytes[2 * 32..3 * 32].copy_from_slice(pt_cmp.y.c0.as_le_bytes()); @@ -67,8 +67,8 @@ mod bn254 { let mut l0_bytes = [0u8; 32 * 4]; let mut l1_bytes = [0u8; 32 * 4]; - // TODO: if we ever need to change this, we should switch to using `StdIn::write` to serialize - // for us and use `read()` instead of `read_vec()` + // TODO: if we ever need to change this, we should switch to using `StdIn::write` to + // serialize for us and use `read()` instead of `read_vec()` pt_bytes[0..32].copy_from_slice(pt_cmp.x.c0.as_le_bytes()); pt_bytes[32..2 * 32].copy_from_slice(pt_cmp.x.c1.as_le_bytes()); pt_bytes[2 * 32..3 * 32].copy_from_slice(pt_cmp.y.c0.as_le_bytes()); diff --git a/extensions/pairing/transpiler/src/lib.rs b/extensions/pairing/transpiler/src/lib.rs index 628a779fb0..7777c37c91 100644 --- a/extensions/pairing/transpiler/src/lib.rs +++ b/extensions/pairing/transpiler/src/lib.rs @@ -71,7 +71,9 @@ impl LocalOpcode for Bls12381Fp12Opcode { #[repr(u16)] pub enum PairingPhantom { /// Uses `b` to determine the curve: `b` is the discriminant of `PairingCurve` kind. - /// Peeks at `[r32{0}(a)..r32{0}(a) + Fp::NUM_LIMBS * 12]_2` to get `f: Fp12` and then resets the hint stream to equal `final_exp_hint(f) = (residue_witness, scaling_factor): (Fp12, Fp12)` as `Fp::NUM_LIMBS * 12 * 2` bytes. + /// Peeks at `[r32{0}(a)..r32{0}(a) + Fp::NUM_LIMBS * 12]_2` to get `f: Fp12` and then resets + /// the hint stream to equal `final_exp_hint(f) = (residue_witness, scaling_factor): (Fp12, + /// Fp12)` as `Fp::NUM_LIMBS * 12 * 2` bytes. HintFinalExp = 0x30, } diff --git a/extensions/rv32-adapters/src/eq_mod.rs b/extensions/rv32-adapters/src/eq_mod.rs index 508d7eb80a..ab80481f19 100644 --- a/extensions/rv32-adapters/src/eq_mod.rs +++ b/extensions/rv32-adapters/src/eq_mod.rs @@ -41,11 +41,10 @@ use serde_big_array::BigArray; use serde_with::serde_as; /// This adapter reads from NUM_READS <= 2 pointers and writes to a register. -/// * The data is read from the heap (address space 2), and the pointers -/// are read from registers (address space 1). -/// * Reads take the form of `BLOCKS_PER_READ` consecutive reads of size -/// `BLOCK_SIZE` from the heap, starting from the addresses in `rs[0]` -/// (and `rs[1]` if `R = 2`). +/// * The data is read from the heap (address space 2), and the pointers are read from registers +/// (address space 1). +/// * Reads take the form of `BLOCKS_PER_READ` consecutive reads of size `BLOCK_SIZE` from the heap, +/// starting from the addresses in `rs[0]` (and `rs[1]` if `R = 2`). /// * Writes are to 32-bit register rd. #[repr(C)] #[derive(AlignedBorrow)] diff --git a/extensions/rv32-adapters/src/heap.rs b/extensions/rv32-adapters/src/heap.rs index 839d69bd07..cd9f93abbc 100644 --- a/extensions/rv32-adapters/src/heap.rs +++ b/extensions/rv32-adapters/src/heap.rs @@ -36,8 +36,8 @@ use super::{ }; /// This adapter reads from NUM_READS <= 2 pointers and writes to 1 pointer. -/// * The data is read from the heap (address space 2), and the pointers -/// are read from registers (address space 1). +/// * The data is read from the heap (address space 2), and the pointers are read from registers +/// (address space 1). /// * Reads are from the addresses in `rs[0]` (and `rs[1]` if `R = 2`). /// * Writes are to the address in `rd`. diff --git a/extensions/rv32-adapters/src/heap_branch.rs b/extensions/rv32-adapters/src/heap_branch.rs index 9148b2d90c..29c9a151c9 100644 --- a/extensions/rv32-adapters/src/heap_branch.rs +++ b/extensions/rv32-adapters/src/heap_branch.rs @@ -41,8 +41,8 @@ use serde::{Deserialize, Serialize}; use serde_big_array::BigArray; /// This adapter reads from NUM_READS <= 2 pointers. -/// * The data is read from the heap (address space 2), and the pointers -/// are read from registers (address space 1). +/// * The data is read from the heap (address space 2), and the pointers are read from registers +/// (address space 1). /// * Reads are from the addresses in `rs[0]` (and `rs[1]` if `R = 2`). #[repr(C)] #[derive(AlignedBorrow)] @@ -101,16 +101,19 @@ impl VmA .eval(builder, ctx.instruction.is_valid.clone()); } - // We constrain the highest limbs of heap pointers to be less than 2^(addr_bits - (RV32_CELL_BITS * (RV32_REGISTER_NUM_LIMBS - 1))). - // This ensures that no overflow occurs when computing memory pointers. Since the number of cells accessed with each address - // will be small enough, and combined with the memory argument, it ensures that all the cells accessed in the memory are less than 2^addr_bits. + // We constrain the highest limbs of heap pointers to be less than 2^(addr_bits - + // (RV32_CELL_BITS * (RV32_REGISTER_NUM_LIMBS - 1))). This ensures that no overflow + // occurs when computing memory pointers. Since the number of cells accessed with each + // address will be small enough, and combined with the memory argument, it ensures + // that all the cells accessed in the memory are less than 2^addr_bits. let need_range_check: Vec = cols .rs_val .iter() .map(|val| val[RV32_REGISTER_NUM_LIMBS - 1]) .collect(); - // range checks constrain to RV32_CELL_BITS bits, so we need to shift the limbs to constrain the correct amount of bits + // range checks constrain to RV32_CELL_BITS bits, so we need to shift the limbs to constrain + // the correct amount of bits let limb_shift = AB::F::from_canonical_usize( 1 << (RV32_CELL_BITS * RV32_REGISTER_NUM_LIMBS - self.address_bits), ); diff --git a/extensions/rv32-adapters/src/vec_heap.rs b/extensions/rv32-adapters/src/vec_heap.rs index a7fc3294ee..39d9ea2941 100644 --- a/extensions/rv32-adapters/src/vec_heap.rs +++ b/extensions/rv32-adapters/src/vec_heap.rs @@ -40,13 +40,12 @@ use serde::{Deserialize, Serialize}; use serde_with::serde_as; /// This adapter reads from R (R <= 2) pointers and writes to 1 pointer. -/// * The data is read from the heap (address space 2), and the pointers -/// are read from registers (address space 1). -/// * Reads take the form of `BLOCKS_PER_READ` consecutive reads of size -/// `READ_SIZE` from the heap, starting from the addresses in `rs[0]` -/// (and `rs[1]` if `R = 2`). -/// * Writes take the form of `BLOCKS_PER_WRITE` consecutive writes of -/// size `WRITE_SIZE` to the heap, starting from the address in `rd`. +/// * The data is read from the heap (address space 2), and the pointers are read from registers +/// (address space 1). +/// * Reads take the form of `BLOCKS_PER_READ` consecutive reads of size `READ_SIZE` from the heap, +/// starting from the addresses in `rs[0]` (and `rs[1]` if `R = 2`). +/// * Writes take the form of `BLOCKS_PER_WRITE` consecutive writes of size `WRITE_SIZE` to the +/// heap, starting from the address in `rd`. #[derive(Clone)] pub struct Rv32VecHeapAdapterChip< F: Field, @@ -247,9 +246,11 @@ impl< .eval(builder, ctx.instruction.is_valid.clone()); } - // We constrain the highest limbs of heap pointers to be less than 2^(addr_bits - (RV32_CELL_BITS * (RV32_REGISTER_NUM_LIMBS - 1))). - // This ensures that no overflow occurs when computing memory pointers. Since the number of cells accessed with each address - // will be small enough, and combined with the memory argument, it ensures that all the cells accessed in the memory are less than 2^addr_bits. + // We constrain the highest limbs of heap pointers to be less than 2^(addr_bits - + // (RV32_CELL_BITS * (RV32_REGISTER_NUM_LIMBS - 1))). This ensures that no overflow + // occurs when computing memory pointers. Since the number of cells accessed with each + // address will be small enough, and combined with the memory argument, it ensures + // that all the cells accessed in the memory are less than 2^addr_bits. let need_range_check: Vec = cols .rs_val .iter() @@ -257,7 +258,8 @@ impl< .map(|val| val[RV32_REGISTER_NUM_LIMBS - 1]) .collect(); - // range checks constrain to RV32_CELL_BITS bits, so we need to shift the limbs to constrain the correct amount of bits + // range checks constrain to RV32_CELL_BITS bits, so we need to shift the limbs to constrain + // the correct amount of bits let limb_shift = AB::F::from_canonical_usize( 1 << (RV32_CELL_BITS * RV32_REGISTER_NUM_LIMBS - self.address_bits), ); diff --git a/extensions/rv32-adapters/src/vec_heap_two_reads.rs b/extensions/rv32-adapters/src/vec_heap_two_reads.rs index 0483df38ba..f829db8bbc 100644 --- a/extensions/rv32-adapters/src/vec_heap_two_reads.rs +++ b/extensions/rv32-adapters/src/vec_heap_two_reads.rs @@ -40,13 +40,13 @@ use serde::{Deserialize, Serialize}; use serde_with::serde_as; /// This adapter reads from 2 pointers and writes to 1 pointer. -/// * The data is read from the heap (address space 2), and the pointers -/// are read from registers (address space 1). -/// * Reads take the form of `BLOCKS_PER_READX` consecutive reads of size -/// `READ_SIZE` from the heap, starting from the addresses in `rs[X]` +/// * The data is read from the heap (address space 2), and the pointers are read from registers +/// (address space 1). +/// * Reads take the form of `BLOCKS_PER_READX` consecutive reads of size `READ_SIZE` from the heap, +/// starting from the addresses in `rs[X]` /// * NOTE that the two reads can read different numbers of blocks. -/// * Writes take the form of `BLOCKS_PER_WRITE` consecutive writes of -/// size `WRITE_SIZE` to the heap, starting from the address in `rd`. +/// * Writes take the form of `BLOCKS_PER_WRITE` consecutive writes of size `WRITE_SIZE` to the +/// heap, starting from the address in `rd`. pub struct Rv32VecHeapTwoReadsAdapterChip< F: Field, const BLOCKS_PER_READ1: usize, @@ -279,7 +279,8 @@ impl< let need_range_check = [&cols.rs1_val, &cols.rs2_val, &cols.rd_val, &cols.rd_val] .map(|val| val[RV32_REGISTER_NUM_LIMBS - 1]); - // range checks constrain to RV32_CELL_BITS bits, so we need to shift the limbs to constrain the correct amount of bits + // range checks constrain to RV32_CELL_BITS bits, so we need to shift the limbs to constrain + // the correct amount of bits let limb_shift = AB::F::from_canonical_usize( 1 << (RV32_CELL_BITS * RV32_REGISTER_NUM_LIMBS - self.address_bits), ); diff --git a/extensions/rv32im/circuit/Cargo.toml b/extensions/rv32im/circuit/Cargo.toml index 9bc8ab5418..8b20385104 100644 --- a/extensions/rv32im/circuit/Cargo.toml +++ b/extensions/rv32im/circuit/Cargo.toml @@ -32,7 +32,7 @@ openvm-stark-sdk = { workspace = true } openvm-circuit = { workspace = true, features = ["test-utils"] } [features] -default = ["parallel", "mimalloc"] +default = ["parallel", "jemalloc"] parallel = ["openvm-circuit/parallel"] test-utils = ["openvm-circuit/test-utils", "dep:openvm-stark-sdk"] # performance features: diff --git a/extensions/rv32im/circuit/src/adapters/loadstore.rs b/extensions/rv32im/circuit/src/adapters/loadstore.rs index e4af5527e0..b92680a0c7 100644 --- a/extensions/rv32im/circuit/src/adapters/loadstore.rs +++ b/extensions/rv32im/circuit/src/adapters/loadstore.rs @@ -47,7 +47,8 @@ use crate::adapters::RV32_CELL_BITS; /// 2 byte aligned lh, lhu, sh instructions and /// 1 byte aligned lb, lbu, sb instructions /// This adapter always batch reads/writes 4 bytes, -/// thus it needs to shift left the memory pointer by some amount in case of not 4 byte aligned intermediate pointers +/// thus it needs to shift left the memory pointer by some amount in case of not 4 byte aligned +/// intermediate pointers pub struct LoadStoreInstruction { /// is_valid is constrained to be bool pub is_valid: T, @@ -56,8 +57,8 @@ pub struct LoadStoreInstruction { /// is_load is constrained to be bool, and can only be 1 if is_valid is 1 pub is_load: T, - /// Keeping two separate shift amounts is needed for getting the read_ptr/write_ptr with degree 2 - /// load_shift_amount will be the shift amount if load and 0 if store + /// Keeping two separate shift amounts is needed for getting the read_ptr/write_ptr with degree + /// 2 load_shift_amount will be the shift amount if load and 0 if store pub load_shift_amount: T, /// store_shift_amount will be 0 if load and the shift amount if store pub store_shift_amount: T, @@ -70,8 +71,9 @@ pub struct LoadStoreInstruction { /// This method ensures that there are no modifications to the global interfaces. /// /// Here 2 reads represent read_data and prev_data, -/// The second element of the tuple in Reads is the shift amount needed to be passed to the core chip -/// Getting the intermediate pointer is completely internal to the adapter and shouldn't be a part of the AdapterInterface +/// The second element of the tuple in Reads is the shift amount needed to be passed to the core +/// chip Getting the intermediate pointer is completely internal to the adapter and shouldn't be a +/// part of the AdapterInterface pub struct Rv32LoadStoreAdapterRuntimeInterface(PhantomData); impl VmAdapterInterface for Rv32LoadStoreAdapterRuntimeInterface { type Reads = ([[T; RV32_REGISTER_NUM_LIMBS]; 2], T); @@ -126,7 +128,8 @@ impl Rv32LoadStoreAdapterChip { #[serde(bound = "F: Field")] pub struct Rv32LoadStoreReadRecord { pub rs1_record: RecordId, - /// This will be a read from a register in case of Stores and a read from RISC-V memory in case of Loads. + /// This will be a read from a register in case of Stores and a read from RISC-V memory in case + /// of Loads. pub read: RecordId, pub rs1_ptr: F, pub imm: F, @@ -140,8 +143,9 @@ pub struct Rv32LoadStoreReadRecord { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "F: Field")] pub struct Rv32LoadStoreWriteRecord { - /// This will be a write to a register in case of Load and a write to RISC-V memory in case of Stores. - /// For better struct packing, `RecordId(usize::MAX)` is used to indicate that there is no write. + /// This will be a write to a register in case of Load and a write to RISC-V memory in case of + /// Stores. For better struct packing, `RecordId(usize::MAX)` is used to indicate that + /// there is no write. pub write_id: RecordId, pub from_state: ExecutionState, pub rd_rs2_ptr: F, @@ -218,7 +222,8 @@ impl VmAdapterAir for Rv32LoadStoreAdapterAir { builder.assert_bool(write_count); builder.when(write_count).assert_one(is_valid.clone()); - // Constrain that if `is_valid == 1` and `write_count == 0`, then `is_load == 1` and `rd_rs2_ptr == x0` + // Constrain that if `is_valid == 1` and `write_count == 0`, then `is_load == 1` and + // `rd_rs2_ptr == x0` builder .when(is_valid.clone() - write_count) .assert_one(is_load.clone()); @@ -293,9 +298,10 @@ impl VmAdapterAir for Rv32LoadStoreAdapterAir { ); // read_ptr is mem_ptr for loads and rd_rs2_ptr for stores - // Note: shift_amount is expected to have degree 2, thus we can't put it in the select clause - // since the resulting read_ptr/write_ptr's degree will be 3 which is too high. - // Instead, the solution without using additional columns is to get two different shift amounts from core chip + // Note: shift_amount is expected to have degree 2, thus we can't put it in the select + // clause since the resulting read_ptr/write_ptr's degree will be 3 which is + // too high. Instead, the solution without using additional columns is to get + // two different shift amounts from core chip let read_ptr = select::(is_load.clone(), mem_ptr.clone(), local_cols.rd_rs2_ptr) - load_shift_amount; diff --git a/extensions/rv32im/circuit/src/adapters/mod.rs b/extensions/rv32im/circuit/src/adapters/mod.rs index 8f2dbb73b7..ab15671b74 100644 --- a/extensions/rv32im/circuit/src/adapters/mod.rs +++ b/extensions/rv32im/circuit/src/adapters/mod.rs @@ -29,8 +29,8 @@ pub const RV_B_TYPE_IMM_BITS: usize = 13; pub const RV_J_TYPE_IMM_BITS: usize = 21; -/// Convert the RISC-V register data (32 bits represented as 4 bytes, where each byte is represented as a field element) -/// back into its value as u32. +/// Convert the RISC-V register data (32 bits represented as 4 bytes, where each byte is represented +/// as a field element) back into its value as u32. pub fn compose(ptr_data: [F; RV32_REGISTER_NUM_LIMBS]) -> u32 { let mut val = 0; for (i, limb) in ptr_data.map(|x| x.as_canonical_u32()).iter().enumerate() { @@ -60,7 +60,8 @@ pub fn read_rv32_register( (record.0, val) } -/// Peeks at the value of a register without updating the memory state or incrementing the timestamp. +/// Peeks at the value of a register without updating the memory state or incrementing the +/// timestamp. pub fn unsafe_read_rv32_register(memory: &MemoryController, pointer: F) -> u32 { let data = memory.unsafe_read::(F::ONE, pointer); compose(data) diff --git a/extensions/rv32im/circuit/src/auipc/core.rs b/extensions/rv32im/circuit/src/auipc/core.rs index 4eb6da3c3e..0426e7a205 100644 --- a/extensions/rv32im/circuit/src/auipc/core.rs +++ b/extensions/rv32im/circuit/src/auipc/core.rs @@ -105,8 +105,9 @@ where let carry_divide = AB::F::from_canonical_usize(1 << RV32_CELL_BITS).inverse(); // Don't need to constrain the least significant limb of the addition - // since we already know that rd_data[0] = pc_limbs[0] and the least significant limb of imm is 0 - // Note: imm_limbs doesn't include the least significant limb so imm_limbs[i - 1] means the i-th limb of imm + // since we already know that rd_data[0] = pc_limbs[0] and the least significant limb of imm + // is 0 Note: imm_limbs doesn't include the least significant limb so imm_limbs[i - + // 1] means the i-th limb of imm for i in 1..RV32_REGISTER_NUM_LIMBS { carry[i] = AB::Expr::from(carry_divide) * (pc_limbs[i].clone() + imm_limbs[i - 1] - rd_data[i] + carry[i - 1].clone()); @@ -120,10 +121,11 @@ where .eval(builder, is_valid); } - // The immediate and PC limbs need range checking to ensure they're within [0, 2^RV32_CELL_BITS) - // Since we range check two items at a time, doing this way helps efficiently divide the limbs into groups of 2 - // Note: range checking the limbs of immediate and PC separately would result in additional range checks - // since they both have odd number of limbs that need to be range checked + // The immediate and PC limbs need range checking to ensure they're within [0, + // 2^RV32_CELL_BITS) Since we range check two items at a time, doing this way helps + // efficiently divide the limbs into groups of 2 Note: range checking the limbs of + // immediate and PC separately would result in additional range checks since + // they both have odd number of limbs that need to be range checked let mut need_range_check: Vec = Vec::new(); for limb in imm_limbs { need_range_check.push(limb.into()); @@ -132,7 +134,8 @@ where // pc_limbs[0] is already range checked through rd_data[0] for (i, limb) in pc_limbs.iter().skip(1).enumerate() { if i == pc_limbs.len() - 1 { - // Range check the most significant limb of pc to be in [0, 2^{PC_BITS-(RV32_REGISTER_NUM_LIMBS-1)*RV32_CELL_BITS}) + // Range check the most significant limb of pc to be in [0, + // 2^{PC_BITS-(RV32_REGISTER_NUM_LIMBS-1)*RV32_CELL_BITS}) need_range_check.push( (*limb).clone() * AB::Expr::from_canonical_usize( diff --git a/extensions/rv32im/circuit/src/branch_eq/core.rs b/extensions/rv32im/circuit/src/branch_eq/core.rs index 3feac9acde..bb04d86ee5 100644 --- a/extensions/rv32im/circuit/src/branch_eq/core.rs +++ b/extensions/rv32im/circuit/src/branch_eq/core.rs @@ -88,7 +88,8 @@ where // For BEQ, inv_marker is used to check equality of a and b: // - If a == b, all inv_marker values must be 0 (sum = 0) - // - If a != b, inv_marker contains 0s for all positions except ONE position i where a[i] != b[i] + // - If a != b, inv_marker contains 0s for all positions except ONE position i where a[i] != + // b[i] // - At this position, inv_marker[i] contains the multiplicative inverse of (a[i] - b[i]) // - This ensures inv_marker[i] * (a[i] - b[i]) = 1, making the sum = 1 // Note: There might be multiple valid inv_marker if a != b. diff --git a/extensions/rv32im/circuit/src/branch_lt/core.rs b/extensions/rv32im/circuit/src/branch_lt/core.rs index 20445a5669..3eebb02146 100644 --- a/extensions/rv32im/circuit/src/branch_lt/core.rs +++ b/extensions/rv32im/circuit/src/branch_lt/core.rs @@ -114,7 +114,8 @@ where let marker = &cols.diff_marker; let mut prefix_sum = AB::Expr::ZERO; - // Check if a_msb_f and b_msb_f are signed values of a[NUM_LIMBS - 1] and b[NUM_LIMBS - 1] in prime field F. + // Check if a_msb_f and b_msb_f are signed values of a[NUM_LIMBS - 1] and b[NUM_LIMBS - 1] + // in prime field F. let a_diff = a[NUM_LIMBS - 1] - cols.a_msb_f; let b_diff = b[NUM_LIMBS - 1] - cols.b_msb_f; builder @@ -133,10 +134,10 @@ where builder.assert_zero(not::(prefix_sum.clone()) * diff.clone()); builder.when(marker[i]).assert_eq(cols.diff_val, diff); } - // - If x != y, then prefix_sum = 1 so marker[i] must be 1 iff i is the first index where diff != 0. - // Constrains that diff == diff_val where diff_val is non-zero. - // - If x == y, then prefix_sum = 0 and cmp_lt = 0. - // Here, prefix_sum cannot be 1 because all diff are zero, making diff == diff_val fails. + // - If x != y, then prefix_sum = 1 so marker[i] must be 1 iff i is the first index where + // diff != 0. Constrains that diff == diff_val where diff_val is non-zero. + // - If x == y, then prefix_sum = 0 and cmp_lt = 0. Here, prefix_sum cannot be 1 because all + // diff are zero, making diff == diff_val fails. builder.assert_bool(prefix_sum.clone()); builder diff --git a/extensions/rv32im/circuit/src/divrem/core.rs b/extensions/rv32im/circuit/src/divrem/core.rs index 1e65a84385..bad043d582 100644 --- a/extensions/rv32im/circuit/src/divrem/core.rs +++ b/extensions/rv32im/circuit/src/divrem/core.rs @@ -120,7 +120,8 @@ where let q = &cols.q; let r = &cols.r; - // Constrain that b = (c * q + r) % 2^{NUM_LIMBS * LIMB_BITS} and range checkeach element in q. + // Constrain that b = (c * q + r) % 2^{NUM_LIMBS * LIMB_BITS} and range checkeach element in + // q. let b_ext = cols.b_sign * AB::F::from_canonical_u32((1 << LIMB_BITS) - 1); let c_ext = cols.c_sign * AB::F::from_canonical_u32((1 << LIMB_BITS) - 1); let carry_divide = AB::F::from_canonical_u32(1 << LIMB_BITS).inverse(); @@ -302,10 +303,10 @@ where builder.assert_zero(not::(prefix_sum.clone()) * diff.clone()); builder.when(marker[i]).assert_eq(cols.lt_diff, diff); } - // - If r_prime != c, then prefix_sum = 1 so marker[i] must be 1 iff i is the first index where diff != 0. - // Constrains that diff == lt_diff where lt_diff is non-zero. - // - If r_prime == c, then prefix_sum = 0. - // Here, prefix_sum cannot be 1 because all diff are zero, making diff == lt_diff fails. + // - If r_prime != c, then prefix_sum = 1 so marker[i] must be 1 iff i is the first index + // where diff != 0. Constrains that diff == lt_diff where lt_diff is non-zero. + // - If r_prime == c, then prefix_sum = 0. Here, prefix_sum cannot be 1 because all diff are + // zero, making diff == lt_diff fails. builder.when(is_valid.clone()).assert_one(prefix_sum); // Range check to ensure lt_diff is non-zero. diff --git a/extensions/rv32im/circuit/src/extension.rs b/extensions/rv32im/circuit/src/extension.rs index 9c6063468a..f1f67d3994 100644 --- a/extensions/rv32im/circuit/src/extension.rs +++ b/extensions/rv32im/circuit/src/extension.rs @@ -1,8 +1,7 @@ use derive_more::derive::From; use openvm_circuit::{ arch::{ - SystemConfig, SystemExecutor, SystemPeriphery, SystemPort, VmChipComplex, VmConfig, - VmExtension, VmInventory, VmInventoryBuilder, VmInventoryError, + SystemConfig, SystemPort, VmExtension, VmInventory, VmInventoryBuilder, VmInventoryError, }, system::phantom::PhantomChip, }; diff --git a/extensions/rv32im/circuit/src/hintstore/mod.rs b/extensions/rv32im/circuit/src/hintstore/mod.rs index 56879869c0..6f70a584d0 100644 --- a/extensions/rv32im/circuit/src/hintstore/mod.rs +++ b/extensions/rv32im/circuit/src/hintstore/mod.rs @@ -204,9 +204,9 @@ impl Air for Rv32HintStoreAir { .eval(builder, is_start.clone()); // Preventing mem_ptr and rem_words overflow - // Constraining mem_ptr_limbs[RV32_REGISTER_NUM_LIMBS - 1] < 2^(pointer_max_bits - (RV32_REGISTER_NUM_LIMBS - 1)*RV32_CELL_BITS) - // which implies mem_ptr <= 2^pointer_max_bits - // Similarly for rem_words <= 2^pointer_max_bits + // Constraining mem_ptr_limbs[RV32_REGISTER_NUM_LIMBS - 1] < 2^(pointer_max_bits - + // (RV32_REGISTER_NUM_LIMBS - 1)*RV32_CELL_BITS) which implies mem_ptr <= + // 2^pointer_max_bits Similarly for rem_words <= 2^pointer_max_bits self.bitwise_operation_lookup_bus .send_range( local_cols.mem_ptr_limbs[RV32_REGISTER_NUM_LIMBS - 1] @@ -230,25 +230,29 @@ impl Air for Rv32HintStoreAir { // buffer transition // `is_end` implies that the next row belongs to a new instruction, // which could be one of empty, hint_single, or hint_buffer - // Constrains that when the current row is not empty and `is_end == 1`, then `rem_words` is 1 + // Constrains that when the current row is not empty and `is_end == 1`, then `rem_words` is + // 1 builder .when(is_valid) .when(is_end.clone()) .assert_one(rem_words.clone()); let mut when_buffer_transition = builder.when(not::(is_end.clone())); - // Notes on `rem_words`: we constrain that `rem_words` doesn't overflow when we first read it and - // that on each row it decreases by one (below). We also constrain that when the current instruction ends then `rem_words` is 1. - // However, we don't constrain that when `rem_words` is 1 then we have to end the current instruction. - // The only way to exploit this if we to do some multiple of `p` number of additional illegal `buffer` rows where `p` is the modulus of `F`. - // However, when doing `p` additional `buffer` rows we will always increment `mem_ptr` to an illegal memory address at some point, - // which prevents this exploit. + // Notes on `rem_words`: we constrain that `rem_words` doesn't overflow when we first read + // it and that on each row it decreases by one (below). We also constrain that when + // the current instruction ends then `rem_words` is 1. However, we don't constrain + // that when `rem_words` is 1 then we have to end the current instruction. + // The only way to exploit this if we to do some multiple of `p` number of additional + // illegal `buffer` rows where `p` is the modulus of `F`. However, when doing `p` + // additional `buffer` rows we will always increment `mem_ptr` to an illegal memory address + // at some point, which prevents this exploit. when_buffer_transition.assert_one(rem_words.clone() - next_rem_words.clone()); - // Note: we only care about the `next_mem_ptr = compose(next_mem_ptr_limb)` and not the individual limbs: - // the limbs do not need to be in the range, they can be anything to make `next_mem_ptr` correct -- - // this is just a way to not have to have another column for `mem_ptr`. - // The constraint we care about is `next.mem_ptr == local.mem_ptr + 4`. - // Finally, since we increment by `4` each time, any out of bounds memory access will be rejected by the memory bus before we overflow the field. + // Note: we only care about the `next_mem_ptr = compose(next_mem_ptr_limb)` and not the + // individual limbs: the limbs do not need to be in the range, they can be anything + // to make `next_mem_ptr` correct -- this is just a way to not have to have another + // column for `mem_ptr`. The constraint we care about is `next.mem_ptr == + // local.mem_ptr + 4`. Finally, since we increment by `4` each time, any out of + // bounds memory access will be rejected by the memory bus before we overflow the field. when_buffer_transition.assert_eq( next_mem_ptr.clone() - mem_ptr.clone(), AB::F::from_canonical_usize(RV32_REGISTER_NUM_LIMBS), diff --git a/extensions/rv32im/circuit/src/jal_lui/core.rs b/extensions/rv32im/circuit/src/jal_lui/core.rs index 48863ff926..2ba10e615e 100644 --- a/extensions/rv32im/circuit/src/jal_lui/core.rs +++ b/extensions/rv32im/circuit/src/jal_lui/core.rs @@ -107,7 +107,8 @@ where ); let intermed_val = rd[0] + intermed_val * AB::Expr::from_canonical_u32(1 << RV32_CELL_BITS); - // Constrain that from_pc + DEFAULT_PC_STEP is the correct composition of intermed_val in case of JAL + // Constrain that from_pc + DEFAULT_PC_STEP is the correct composition of intermed_val in + // case of JAL builder.when(is_jal).assert_eq( intermed_val, from_pc + AB::F::from_canonical_u32(DEFAULT_PC_STEP), diff --git a/extensions/rv32im/circuit/src/jalr/core.rs b/extensions/rv32im/circuit/src/jalr/core.rs index ea933b90eb..fd89c1e317 100644 --- a/extensions/rv32im/circuit/src/jalr/core.rs +++ b/extensions/rv32im/circuit/src/jalr/core.rs @@ -109,10 +109,11 @@ where let least_sig_limb = from_pc + AB::F::from_canonical_u32(DEFAULT_PC_STEP) - composed; // rd_data is the final decomposition of `from_pc + DEFAULT_PC_STEP` we need. - // The range check on `least_sig_limb` also ensures that `rd_data` correctly represents `from_pc + DEFAULT_PC_STEP`. - // Specifically, if `rd_data` does not match the expected limb, then `least_sig_limb` becomes - // the real `least_sig_limb` plus the difference between `composed` and the three most significant limbs of `from_pc + DEFAULT_PC_STEP`. - // In that case, `least_sig_limb` >= 2^RV32_CELL_BITS. + // The range check on `least_sig_limb` also ensures that `rd_data` correctly represents + // `from_pc + DEFAULT_PC_STEP`. Specifically, if `rd_data` does not match the + // expected limb, then `least_sig_limb` becomes the real `least_sig_limb` plus the + // difference between `composed` and the three most significant limbs of `from_pc + + // DEFAULT_PC_STEP`. In that case, `least_sig_limb` >= 2^RV32_CELL_BITS. let rd_data = array::from_fn(|i| { if i == 0 { least_sig_limb.clone() @@ -135,8 +136,8 @@ where builder.assert_bool(imm_sign); - // Constrain to_pc_least_sig_bit + 2 * to_pc_limbs = rs1 + imm as a i32 addition with 2 limbs - // RISC-V spec explicitly sets the least significant bit of `to_pc` to 0 + // Constrain to_pc_least_sig_bit + 2 * to_pc_limbs = rs1 + imm as a i32 addition with 2 + // limbs RISC-V spec explicitly sets the least significant bit of `to_pc` to 0 let rs1_limbs_01 = rs1[0] + rs1[1] * AB::F::from_canonical_u32(1 << RV32_CELL_BITS); let rs1_limbs_23 = rs1[2] + rs1[3] * AB::F::from_canonical_u32(1 << RV32_CELL_BITS); let inv = AB::F::from_canonical_u32(1 << 16).inverse(); diff --git a/extensions/rv32im/circuit/src/less_than/core.rs b/extensions/rv32im/circuit/src/less_than/core.rs index 2e6e5cdcd0..a605dc43de 100644 --- a/extensions/rv32im/circuit/src/less_than/core.rs +++ b/extensions/rv32im/circuit/src/less_than/core.rs @@ -111,10 +111,10 @@ where builder.assert_zero(not::(prefix_sum.clone()) * diff.clone()); builder.when(marker[i]).assert_eq(cols.diff_val, diff); } - // - If x != y, then prefix_sum = 1 so marker[i] must be 1 iff i is the first index where diff != 0. - // Constrains that diff == diff_val where diff_val is non-zero. - // - If x == y, then prefix_sum = 0 and cmp_result = 0. - // Here, prefix_sum cannot be 1 because all diff are zero, making diff == diff_val fails. + // - If x != y, then prefix_sum = 1 so marker[i] must be 1 iff i is the first index where + // diff != 0. Constrains that diff == diff_val where diff_val is non-zero. + // - If x == y, then prefix_sum = 0 and cmp_result = 0. Here, prefix_sum cannot be 1 because + // all diff are zero, making diff == diff_val fails. builder.assert_bool(prefix_sum.clone()); builder diff --git a/extensions/rv32im/circuit/src/load_sign_extend/core.rs b/extensions/rv32im/circuit/src/load_sign_extend/core.rs index ff8b391590..2284d6815c 100644 --- a/extensions/rv32im/circuit/src/load_sign_extend/core.rs +++ b/extensions/rv32im/circuit/src/load_sign_extend/core.rs @@ -26,9 +26,10 @@ use crate::adapters::LoadStoreInstruction; /// LoadSignExtend Core Chip handles byte/halfword into word conversions through sign extend /// This chip uses read_data to construct write_data -/// prev_data columns are not used in constraints defined in the CoreAir, but are used in constraints by the Adapter -/// shifted_read_data is the read_data shifted by (shift_amount & 2), this reduces the number of opcode flags needed -/// using this shifted data we can generate the write_data as if the shift_amount was 0 for loadh and 0 or 1 for loadb +/// prev_data columns are not used in constraints defined in the CoreAir, but are used in +/// constraints by the Adapter shifted_read_data is the read_data shifted by (shift_amount & 2), +/// this reduces the number of opcode flags needed using this shifted data we can generate the +/// write_data as if the shift_amount was 0 for loadh and 0 or 1 for loadb #[repr(C)] #[derive(Debug, Clone, AlignedBorrow)] pub struct LoadSignExtendCoreCols { diff --git a/extensions/rv32im/circuit/src/mul/core.rs b/extensions/rv32im/circuit/src/mul/core.rs index de2711786f..fa65a6cf09 100644 --- a/extensions/rv32im/circuit/src/mul/core.rs +++ b/extensions/rv32im/circuit/src/mul/core.rs @@ -70,7 +70,8 @@ where let c = &cols.c; // Define carry[i] = (sum_{k=0}^{i} b[k] * c[i - k] + carry[i - 1] - a[i]) / 2^LIMB_BITS. - // If 0 <= a[i], carry[i] < 2^LIMB_BITS, it can be proven that a[i] = sum_{k=0}^{i} (b[k] * c[i - k]) % 2^LIMB_BITS as necessary. + // If 0 <= a[i], carry[i] < 2^LIMB_BITS, it can be proven that a[i] = sum_{k=0}^{i} (b[k] * + // c[i - k]) % 2^LIMB_BITS as necessary. let mut carry: [AB::Expr; NUM_LIMBS] = array::from_fn(|_| AB::Expr::ZERO); let carry_divide = AB::F::from_canonical_u32(1 << LIMB_BITS).inverse(); diff --git a/extensions/rv32im/transpiler/src/instructions.rs b/extensions/rv32im/transpiler/src/instructions.rs index 8d0ed0d73f..0cd013a8ba 100644 --- a/extensions/rv32im/transpiler/src/instructions.rs +++ b/extensions/rv32im/transpiler/src/instructions.rs @@ -272,7 +272,8 @@ pub enum Rv32HintStoreOpcode { #[derive(Copy, Clone, Debug, PartialEq, Eq, FromRepr)] #[repr(u16)] pub enum Rv32Phantom { - /// Prepare the next input vector for hinting, but prepend it with a 4-byte decomposition of its length instead of one field element. + /// Prepare the next input vector for hinting, but prepend it with a 4-byte decomposition of + /// its length instead of one field element. HintInput = 0x20, /// Peek string from memory and print it to stdout. PrintStr, diff --git a/extensions/rv32im/transpiler/src/lib.rs b/extensions/rv32im/transpiler/src/lib.rs index 8cf4e5d02e..445ef9f43e 100644 --- a/extensions/rv32im/transpiler/src/lib.rs +++ b/extensions/rv32im/transpiler/src/lib.rs @@ -49,7 +49,8 @@ impl TranspilerExtension for Rv32ITranspilerExtension { if dec_insn.funct3 as u8 == CSRRW_FUNCT3 { // CSRRW if dec_insn.rs1 == 0 && dec_insn.rd == 0 { - // This resets the CSR counter to zero. Since we don't have any CSR registers, this is a nop. + // This resets the CSR counter to zero. Since we don't have any CSR + // registers, this is a nop. return Some(TranspilerOutput::one_to_one(nop())); } } diff --git a/extensions/sha256/circuit/Cargo.toml b/extensions/sha256/circuit/Cargo.toml index 0c7100e99b..95c87b0871 100644 --- a/extensions/sha256/circuit/Cargo.toml +++ b/extensions/sha256/circuit/Cargo.toml @@ -29,7 +29,7 @@ openvm-stark-sdk = { workspace = true } openvm-circuit = { workspace = true, features = ["test-utils"] } [features] -default = ["parallel", "mimalloc"] +default = ["parallel", "jemalloc"] parallel = ["openvm-circuit/parallel"] test-utils = ["openvm-circuit/test-utils"] # performance features: diff --git a/extensions/sha256/circuit/src/extension.rs b/extensions/sha256/circuit/src/extension.rs index 49cc57236c..76a6c1ec0c 100644 --- a/extensions/sha256/circuit/src/extension.rs +++ b/extensions/sha256/circuit/src/extension.rs @@ -1,9 +1,6 @@ use derive_more::derive::From; use openvm_circuit::{ - arch::{ - SystemConfig, SystemExecutor, SystemPeriphery, VmChipComplex, VmConfig, VmExtension, - VmInventory, VmInventoryBuilder, VmInventoryError, - }, + arch::{SystemConfig, VmExtension, VmInventory, VmInventoryBuilder, VmInventoryError}, system::phantom::PhantomChip, }; use openvm_circuit_derive::{AnyEnum, InstructionExecutor, VmConfig}; diff --git a/extensions/sha256/circuit/src/sha256_chip/air.rs b/extensions/sha256/circuit/src/sha256_chip/air.rs index 0487314ea0..286df07697 100644 --- a/extensions/sha256/circuit/src/sha256_chip/air.rs +++ b/extensions/sha256/circuit/src/sha256_chip/air.rs @@ -86,10 +86,10 @@ pub(super) enum PaddingFlags { FirstPadding13, FirstPadding14, FirstPadding15, - /// FIRST_PADDING_i_LastRow: it is the first row with padding and there are i cells of non-padding - /// AND it is the last reading row of the message - /// NOTE: if the Last row has padding it has to be at least 9 cells since the last 8 cells are padded with - /// the message length + /// FIRST_PADDING_i_LastRow: it is the first row with padding and there are i cells of + /// non-padding AND it is the last reading row of the message + /// NOTE: if the Last row has padding it has to be at least 9 cells since the last 8 cells are + /// padded with the message length FirstPadding0_LastRow, FirstPadding1_LastRow, FirstPadding2_LastRow, @@ -140,8 +140,9 @@ impl Sha256VmAir { ) { let next_is_last_row = next.inner.flags.is_digest_row * next.inner.flags.is_last_block; - // Constrain that `padding_occured` is 1 on a suffix of rows in each message, excluding the last - // digest row, and 0 everywhere else. Furthermore, the suffix starts in the first 4 rows of some block. + // Constrain that `padding_occured` is 1 on a suffix of rows in each message, excluding the + // last digest row, and 0 everywhere else. Furthermore, the suffix starts in the + // first 4 rows of some block. builder.assert_bool(local.control.padding_occurred); // Last round row in the last block has padding_occurred = 1 @@ -161,8 +162,9 @@ impl Sha256VmAir { .when(local.control.padding_occurred - next_is_last_row.clone()) .assert_one(next.control.padding_occurred); - // If next row is not first 4 rows of a block, then next.padding_occurred = local.padding_occurred. - // So padding_occurred only changes in the first 4 rows of a block. + // If next row is not first 4 rows of a block, then next.padding_occurred = + // local.padding_occurred. So padding_occurred only changes in the first 4 rows of a + // block. builder .when_transition() .when(not(next.inner.flags.is_first_4_rows) - next_is_last_row) @@ -252,7 +254,8 @@ impl Sha256VmAir { is_next_entire_padding, ); - // `pad_flags` is `FirstPadding*` if current row is padding and the previous row is not padding + // `pad_flags` is `FirstPadding*` if current row is padding and the previous row is not + // padding builder.when(next.inner.flags.is_first_4_rows).assert_eq( not(local.control.padding_occurred) * next.control.padding_occurred, is_next_first_padding, @@ -396,8 +399,8 @@ impl Sha256VmAir { + local.inner.message_schedule.w[3][2], ); - // We can't support messages longer than 2^30 bytes because the length has to fit in a field element. - // So, constrain that the first 4 bytes of the length are 0. + // We can't support messages longer than 2^30 bytes because the length has to fit in a field + // element. So, constrain that the first 4 bytes of the length are 0. // Thus, the bit-length is < 2^32 so the message is < 2^29 bytes. for i in 8..12 { builder @@ -421,7 +424,8 @@ impl Sha256VmAir { .when(not::(is_last_row.clone())) .assert_eq(next_cols.control.len, local_cols.control.len); - // Read ptr should increment by [SHA256_READ_SIZE] for the first 4 rows and stay the same otherwise + // Read ptr should increment by [SHA256_READ_SIZE] for the first 4 rows and stay the same + // otherwise let read_ptr_delta = local_cols.inner.flags.is_first_4_rows * AB::Expr::from_canonical_usize(SHA256_READ_SIZE); builder @@ -527,7 +531,8 @@ impl Sha256VmAir { // This only works if self.ptr_max_bits >= 24 which is typically the case self.bitwise_lookup_bus .send_range( - // It is fine to shift like this since we already know that dst_ptr and src_ptr have [RV32_CELL_BITS] bits + // It is fine to shift like this since we already know that dst_ptr and src_ptr + // have [RV32_CELL_BITS] bits local_cols.dst_ptr[RV32_REGISTER_NUM_LIMBS - 1] * shift.clone(), local_cols.src_ptr[RV32_REGISTER_NUM_LIMBS - 1] * shift.clone(), ) @@ -548,8 +553,9 @@ impl Sha256VmAir { let dst_ptr_val = compose::(&local_cols.dst_ptr.map(|x| x.into()), RV32_CELL_BITS); - // Note: revisit in the future to do 2 block writes of 16 cells instead of 1 block write of 32 cells - // This could be beneficial as the output is often an input for another hash + // Note: revisit in the future to do 2 block writes of 16 cells instead of 1 block write of + // 32 cells This could be beneficial as the output is often an input for + // another hash self.memory_bridge .write( MemoryAddress::new(AB::Expr::from_canonical_u32(RV32_MEMORY_AS), dst_ptr_val), diff --git a/extensions/sha256/circuit/src/sha256_chip/columns.rs b/extensions/sha256/circuit/src/sha256_chip/columns.rs index a24014e51c..38c13a0f73 100644 --- a/extensions/sha256/circuit/src/sha256_chip/columns.rs +++ b/extensions/sha256/circuit/src/sha256_chip/columns.rs @@ -10,7 +10,8 @@ use openvm_sha256_air::{Sha256DigestCols, Sha256RoundCols}; use super::{SHA256_REGISTER_READS, SHA256_WRITE_SIZE}; -/// the first 16 rows of every SHA256 block will be of type Sha256VmRoundCols and the last row will be of type Sha256VmDigestCols +/// the first 16 rows of every SHA256 block will be of type Sha256VmRoundCols and the last row will +/// be of type Sha256VmDigestCols #[repr(C)] #[derive(Clone, Copy, Debug, AlignedBorrow)] pub struct Sha256VmRoundCols { @@ -26,7 +27,8 @@ pub struct Sha256VmDigestCols { pub inner: Sha256DigestCols, pub from_state: ExecutionState, - /// It is counter intuitive, but we will constrain the register reads on the very last row of every message + /// It is counter intuitive, but we will constrain the register reads on the very last row of + /// every message pub rd_ptr: T, pub rs1_ptr: T, pub rs2_ptr: T, @@ -47,7 +49,8 @@ pub struct Sha256VmControlCols { /// Need to keep timestamp and read_ptr since block reads don't have the necessary information pub cur_timestamp: T, pub read_ptr: T, - /// Padding flags which will be used to encode the the number of non-padding cells in the current row + /// Padding flags which will be used to encode the the number of non-padding cells in the + /// current row pub pad_flags: [T; 6], /// A boolean flag that indicates whether a padding already occurred pub padding_occurred: T, diff --git a/extensions/sha256/circuit/src/sha256_chip/mod.rs b/extensions/sha256/circuit/src/sha256_chip/mod.rs index 5b9bf7bb46..4c40eca5d8 100644 --- a/extensions/sha256/circuit/src/sha256_chip/mod.rs +++ b/extensions/sha256/circuit/src/sha256_chip/mod.rs @@ -132,7 +132,8 @@ impl InstructionExecutor for Sha256VmChip { assert!(len < (1 << self.air.ptr_max_bits)); } - // need to pad with one 1 bit, 64 bits for the message length and then pad until the length is divisible by [SHA256_BLOCK_BITS] + // need to pad with one 1 bit, 64 bits for the message length and then pad until the length + // is divisible by [SHA256_BLOCK_BITS] let num_blocks = ((len << 3) as usize + 1 + 64).div_ceil(SHA256_BLOCK_BITS); // we will read [num_blocks] * [SHA256_BLOCK_CELLS] cells but only [len] cells will be used diff --git a/extensions/sha256/circuit/src/sha256_chip/trace.rs b/extensions/sha256/circuit/src/sha256_chip/trace.rs index 0d51a21368..71468f4b1c 100644 --- a/extensions/sha256/circuit/src/sha256_chip/trace.rs +++ b/extensions/sha256/circuit/src/sha256_chip/trace.rs @@ -94,7 +94,8 @@ where ); // During the first pass we will fill out most of the matrix - // But there are some cells that can't be generated by the first pass so we will do a second pass over the matrix + // But there are some cells that can't be generated by the first pass so we will do a second + // pass over the matrix values .par_chunks_mut(width * SHA256_ROWS_PER_BLOCK) .zip(states.into_par_iter().enumerate()) diff --git a/rustfmt.toml b/rustfmt.toml index 7f1b494d59..7d773d6025 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -2,3 +2,8 @@ style_edition = "2021" imports_granularity = "Crate" # unstable features: use cargo +nightly fmt group_imports = "StdExternalCrate" +comment_width = 100 +wrap_comments = true +format_code_in_doc_comments = true +doc_comment_code_block_width = 100 +use_field_init_shorthand = true