From f2c9664ca4aec69d4028e20bec50962208663b34 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 23 Dec 2025 15:19:15 +0000 Subject: [PATCH 01/46] benchcoin: add tooling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds build configuration, benchmarking CI workflows, Python dependencies, plotting tools, and documentation for benchcoin. Co-authored-by: David Gumberg Co-authored-by: LΕ‘rinc --- .github/README.md | 1 + .github/workflows/benchmark.yml | 112 ++++ .github/workflows/nightly-benchmark.yml | 179 +++++++ .github/workflows/publish-results.yml | 146 ++++++ .github/workflows/rebase.yml | 35 ++ bench.py | 655 +++++++++++++++++++++++ bench/README.md | 234 +++++++++ bench/__init__.py | 3 + bench/analyze.py | 538 +++++++++++++++++++ bench/benchmark.py | 362 +++++++++++++ bench/benchmark_config.py | 249 +++++++++ bench/build.py | 197 +++++++ bench/capabilities.py | 117 +++++ bench/compare.py | 180 +++++++ bench/config.py | 230 +++++++++ bench/configs/nightly.toml | 24 + bench/configs/pr.toml | 29 ++ bench/configs/test-signet.toml | 21 + bench/machine.py | 175 +++++++ bench/nightly.py | 456 ++++++++++++++++ bench/patchelf.py | 135 +++++ bench/report.py | 659 ++++++++++++++++++++++++ bench/utils.py | 105 ++++ doc/benchcoin.md | 181 +++++++ doc/flamegraph.svg | 491 ++++++++++++++++++ flake.lock | 27 + flake.nix | 170 ++++++ justfile | 115 +++++ 28 files changed, 5826 insertions(+) create mode 120000 .github/README.md create mode 100644 .github/workflows/benchmark.yml create mode 100644 .github/workflows/nightly-benchmark.yml create mode 100644 .github/workflows/publish-results.yml create mode 100644 .github/workflows/rebase.yml create mode 100755 bench.py create mode 100644 bench/README.md create mode 100644 bench/__init__.py create mode 100644 bench/analyze.py create mode 100644 bench/benchmark.py create mode 100644 bench/benchmark_config.py create mode 100644 bench/build.py create mode 100644 bench/capabilities.py create mode 100644 bench/compare.py create mode 100644 bench/config.py create mode 100644 bench/configs/nightly.toml create mode 100644 bench/configs/pr.toml create mode 100644 bench/configs/test-signet.toml create mode 100644 bench/machine.py create mode 100644 bench/nightly.py create mode 100644 bench/patchelf.py create mode 100644 bench/report.py create mode 100644 bench/utils.py create mode 100644 doc/benchcoin.md create mode 100644 doc/flamegraph.svg create mode 100644 flake.lock create mode 100644 flake.nix create mode 100644 justfile diff --git a/.github/README.md b/.github/README.md new file mode 120000 index 000000000000..e5c578ba74b5 --- /dev/null +++ b/.github/README.md @@ -0,0 +1 @@ +../doc/benchcoin.md \ No newline at end of file diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 000000000000..d276bf2e59b8 --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,112 @@ +name: Benchmark +on: + pull_request: + branches: + - master + +jobs: + build-binaries: + runs-on: [self-hosted, linux, x64] + env: + BASE_SHA: ${{ github.event.pull_request.base.sha }} + steps: + - name: Checkout repo + uses: actions/checkout@v4 + with: + fetch-depth: 1 + + - name: Fetch base commit + run: | + echo "HEAD_SHA=$(git rev-parse HEAD)" >> "$GITHUB_ENV" + git fetch --depth=1 origin ${{ github.event.pull_request.base.sha }} + + - name: Build both binaries + run: | + nix develop --command python3 bench.py build \ + -o ${{ runner.temp }}/binaries \ + $BASE_SHA:base $HEAD_SHA:head + + - name: Upload binaries + uses: actions/upload-artifact@v4 + with: + name: bitcoind-binaries + path: ${{ runner.temp }}/binaries/ + + benchmark: + needs: build-binaries + strategy: + matrix: + # Matrix entries from configs/pr.toml: dbcache=[450,32000] x instrumented=[false,true] + name: [450-false, 450-true, 32000-false, 32000-true] + runs-on: [self-hosted, linux, x64] + timeout-minutes: 600 + env: + ORIGINAL_DATADIR: /data/pruned-840k + BASE_SHA: ${{ github.event.pull_request.base.sha }} + steps: + - name: Checkout repo + uses: actions/checkout@v4 + with: + fetch-depth: 1 + + - name: Download binaries + uses: actions/download-artifact@v4 + with: + name: bitcoind-binaries + path: ${{ runner.temp }}/binaries + + - name: Set binary permissions + run: | + chmod +x ${{ runner.temp }}/binaries/base/bitcoind + chmod +x ${{ runner.temp }}/binaries/head/bitcoind + + - name: Fetch base commit + run: | + echo "HEAD_SHA=$(git rev-parse HEAD)" >> "$GITHUB_ENV" + git fetch --depth=1 origin ${{ github.event.pull_request.base.sha }} + + - name: Run benchmark + run: | + nix develop --command python3 bench.py run \ + --benchmark-config bench/configs/pr.toml \ + --matrix-entry ${{ matrix.name }} \ + --datadir $ORIGINAL_DATADIR \ + --tmp-datadir ${{ runner.temp }}/datadir \ + --output-dir ${{ runner.temp }}/output \ + base:${{ runner.temp }}/binaries/base/bitcoind \ + head:${{ runner.temp }}/binaries/head/bitcoind + + - name: Upload results + uses: actions/upload-artifact@v4 + with: + name: result-${{ matrix.name }} + path: ${{ runner.temp }}/output/results.json + + - name: Upload plots + uses: actions/upload-artifact@v4 + with: + name: pngs-${{ matrix.name }} + path: ${{ runner.temp }}/output/plots/*.png + if-no-files-found: ignore + + - name: Upload flamegraphs + uses: actions/upload-artifact@v4 + with: + name: flamegraph-${{ matrix.name }} + path: ${{ runner.temp }}/output/*-flamegraph.svg + if-no-files-found: ignore + + - name: Write context metadata + env: + GITHUB_CONTEXT: ${{ toJSON(github) }} + RUNNER_CONTEXT: ${{ toJSON(runner) }} + run: | + mkdir -p ${{ runner.temp }}/contexts + echo "$GITHUB_CONTEXT" | nix develop --command jq "del(.token)" > ${{ runner.temp }}/contexts/github.json + echo "$RUNNER_CONTEXT" > ${{ runner.temp }}/contexts/runner.json + + - name: Upload context metadata + uses: actions/upload-artifact@v4 + with: + name: run-metadata-${{ matrix.name }} + path: ${{ runner.temp }}/contexts/ diff --git a/.github/workflows/nightly-benchmark.yml b/.github/workflows/nightly-benchmark.yml new file mode 100644 index 000000000000..511702d40a9a --- /dev/null +++ b/.github/workflows/nightly-benchmark.yml @@ -0,0 +1,179 @@ +name: Nightly Benchmark +on: + workflow_run: + workflows: ["Nightly Rebase"] + types: [completed] + workflow_dispatch: + +jobs: + build: + if: ${{ github.event.workflow_run.conclusion == 'success' || github.event_name == 'workflow_dispatch' }} + runs-on: [self-hosted, linux, x64] + steps: + - name: Checkout repo + uses: actions/checkout@v4 + with: + fetch-depth: 50 # Need history for merge-base + + - name: Get commit SHAs + run: | + # Benchcoin commit (for building) + echo "BENCHCOIN_SHA=$(git rev-parse HEAD)" >> "$GITHUB_ENV" + + # Bitcoin commit - find merge-base with upstream + git remote add upstream https://github.com/bitcoin/bitcoin.git + git fetch upstream master + BITCOIN_SHA=$(git merge-base HEAD upstream/master) + echo "BITCOIN_SHA=$BITCOIN_SHA" >> "$GITHUB_ENV" + + echo "Benchcoin: $(git rev-parse HEAD)" + echo "Bitcoin merge-base: $BITCOIN_SHA" + + - name: Build master binary + run: | + nix develop --command python3 bench.py build \ + -o ${{ runner.temp }}/binaries \ + $BENCHCOIN_SHA:master + + - name: Upload binaries + uses: actions/upload-artifact@v4 + with: + name: nightly-binaries + path: ${{ runner.temp }}/binaries/ + + - name: Upload commit info + run: | + echo "$BITCOIN_SHA" > ${{ runner.temp }}/commit.txt + - uses: actions/upload-artifact@v4 + with: + name: commit-info + path: ${{ runner.temp }}/commit.txt + + benchmark: + needs: build + strategy: + matrix: + # Matrix entries from configs/nightly.toml: dbcache=[450,32000] + name: ["450", "32000"] + runs-on: [self-hosted, linux, x64] + timeout-minutes: 600 + env: + ORIGINAL_DATADIR: /data/pruned-840k + steps: + - name: Checkout repo + uses: actions/checkout@v4 + with: + fetch-depth: 1 + + - name: Download binaries + uses: actions/download-artifact@v4 + with: + name: nightly-binaries + path: ${{ runner.temp }}/binaries + + - name: Set binary permissions + run: | + chmod +x ${{ runner.temp }}/binaries/master/bitcoind + + - name: Run benchmark + run: | + nix develop --command python3 bench.py run \ + --benchmark-config bench/configs/nightly.toml \ + --matrix-entry ${{ matrix.name }} \ + --datadir $ORIGINAL_DATADIR \ + --tmp-datadir ${{ runner.temp }}/datadir \ + --output-dir ${{ runner.temp }}/output \ + master:${{ runner.temp }}/binaries/master/bitcoind + + - name: Upload results + uses: actions/upload-artifact@v4 + with: + name: result-nightly-${{ matrix.name }} + path: ${{ runner.temp }}/output/results.json + + publish: + needs: benchmark + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - name: Checkout gh-pages + uses: actions/checkout@v4 + with: + ref: gh-pages + + - name: Checkout benchcoin tools + uses: actions/checkout@v4 + with: + ref: master + path: benchcoin-tools + + - name: Download commit info + uses: actions/download-artifact@v4 + with: + name: commit-info + path: ./commit-info + + - name: Download 450 results + uses: actions/download-artifact@v4 + with: + name: result-nightly-450 + path: ./nightly-450-results + + - name: Download 32000 results + uses: actions/download-artifact@v4 + with: + name: result-nightly-32000 + path: ./nightly-32000-results + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Get current date + run: | + echo "DATE=$(date -u +%Y-%m-%d)" >> "$GITHUB_ENV" + + - name: Append results to history + run: | + COMMIT=$(cat ./commit-info/commit.txt) + cd benchcoin-tools + + # Append 450 (default dbcache) result (with machine specs and config capture) + python3 bench.py nightly \ + --history-file ../nightly-history.json \ + append \ + ../nightly-450-results/results.json \ + "$COMMIT" \ + 450 \ + 450 \ + --date "$DATE" \ + --capture-machine \ + --benchmark-config bench/configs/nightly.toml + + # Append 32000 (large dbcache) result + python3 bench.py nightly \ + --history-file ../nightly-history.json \ + append \ + ../nightly-32000-results/results.json \ + "$COMMIT" \ + 32000 \ + 32000 \ + --date "$DATE" + + - name: Generate chart + run: | + cd benchcoin-tools + python3 bench.py nightly \ + --history-file ../nightly-history.json \ + chart \ + ../index.html + + - name: Commit and push to gh-pages + run: | + git config --global user.name "github-actions[bot]" + git config --global user.email "github-actions[bot]@users.noreply.github.com" + git add nightly-history.json index.html + git commit -m "Update nightly benchmark results for $DATE" || echo "No changes to commit" + git push origin gh-pages diff --git a/.github/workflows/publish-results.yml b/.github/workflows/publish-results.yml new file mode 100644 index 000000000000..cde65f34a456 --- /dev/null +++ b/.github/workflows/publish-results.yml @@ -0,0 +1,146 @@ +name: Publish Results +on: + workflow_run: + workflows: ["Benchmark"] + types: [completed] +jobs: + build: + runs-on: ubuntu-latest + if: ${{ github.event.workflow_run.conclusion == 'success' }} + permissions: + actions: read + contents: write + checks: read + env: + # Matrix entries from configs/pr.toml: dbcache=[450,32000] x instrumented=[false,true] + NETWORKS: "450-true,32000-true,450-false,32000-false" + outputs: + speedups: ${{ steps.generate.outputs.speedups }} + pr-number: ${{ steps.metadata.outputs.pr-number }} + result-url: ${{ steps.generate.outputs.result-url }} + steps: + - uses: actions/checkout@v4 + with: + ref: gh-pages + + - name: Checkout benchcoin tools + uses: actions/checkout@v4 + with: + ref: master + path: benchcoin-tools + + - name: Download artifacts + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh run download ${{ github.event.workflow_run.id }} --repo ${{ github.repository }} + + - name: Extract artifacts + run: | + for network in ${NETWORKS//,/ }; do + # Create network-specific directories with results + if [ -d "result-${network}" ]; then + mkdir -p "${network}-results" + mv "result-${network}/results.json" "${network}-results/" + fi + + # Copy flamegraphs into network results directory + if [ -d "flamegraph-${network}" ]; then + cp -r "flamegraph-${network}"/* "${network}-results/" 2>/dev/null || true + fi + + # Copy plots into network results directory + if [ -d "pngs-${network}" ]; then + mkdir -p "${network}-results/plots" + cp -r "pngs-${network}"/* "${network}-results/plots/" 2>/dev/null || true + fi + + # Keep metadata separate for extraction + if [ -d "run-metadata-${network}" ]; then + mkdir -p "${network}-metadata" + mv "run-metadata-${network}"/* "${network}-metadata/" + fi + done + + - name: Extract metadata + id: metadata + run: | + # Find PR number and run ID from any available metadata + for network in ${NETWORKS//,/ }; do + if [ -f "${network}-metadata/github.json" ]; then + PR_NUMBER=$(jq -r '.event.pull_request.number // "main"' "${network}-metadata/github.json") + RUN_ID=$(jq -r '.run_id' "${network}-metadata/github.json") + echo "pr-number=${PR_NUMBER}" >> $GITHUB_OUTPUT + echo "run-id=${RUN_ID}" >> $GITHUB_OUTPUT + echo "Found metadata: PR=${PR_NUMBER}, Run=${RUN_ID}" + break + fi + done + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Generate report + id: generate + env: + PR_NUMBER: ${{ steps.metadata.outputs.pr-number }} + RUN_ID: ${{ steps.metadata.outputs.run-id }} + run: | + cd benchcoin-tools + + # Build network arguments + NETWORK_ARGS="" + for network in ${NETWORKS//,/ }; do + if [ -d "../${network}-results" ]; then + NETWORK_ARGS="${NETWORK_ARGS} --network ${network}:../${network}-results" + fi + done + + # Generate report + python3 bench.py report \ + ${NETWORK_ARGS} \ + --pr-number "${PR_NUMBER}" \ + --run-id "${RUN_ID}" \ + --update-index \ + "../results/pr-${PR_NUMBER}/${RUN_ID}" + + # Read speedups from generated results.json (filter for uninstrumented runs: *-false) + SPEEDUPS=$(jq -r '.speedups | to_entries | map(select(.key | endswith("-false"))) | map("\(.key): \(.value)%") | join(", ")' "../results/pr-${PR_NUMBER}/${RUN_ID}/results.json") + echo "speedups=${SPEEDUPS}" >> $GITHUB_OUTPUT + + RESULT_URL="https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/results/pr-${PR_NUMBER}/${RUN_ID}/index.html" + echo "result-url=${RESULT_URL}" >> $GITHUB_OUTPUT + + - name: Upload Pages artifact + uses: actions/upload-pages-artifact@v3 + with: + path: results + + - name: Commit and push to gh-pages + run: | + git config --global user.name "github-actions[bot]" + git config --global user.email "github-actions[bot]@users.noreply.github.com" + # Note: Only add results/ directory, not root index.html + # The root index.html is managed by nightly-benchmark.yml + git add results/ + git commit -m "Update benchmark results from run ${{ github.event.workflow_run.id }}" + git push origin gh-pages + + comment-pr: + needs: build + runs-on: ubuntu-latest + permissions: + pull-requests: write + actions: read + steps: + - name: Comment on PR + if: ${{ needs.build.outputs.pr-number != 'main' }} + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh pr comment ${{ needs.build.outputs.pr-number }} \ + --repo ${{ github.repository }} \ + --body "πŸ“Š Benchmark results for this run (${{ github.event.workflow_run.id }}) will be available at: ${{ needs.build.outputs.result-url }} after the github pages \"build and deployment\" action has completed. + πŸš€ Speedups: ${{ needs.build.outputs.speedups }}" diff --git a/.github/workflows/rebase.yml b/.github/workflows/rebase.yml new file mode 100644 index 000000000000..bd721c123302 --- /dev/null +++ b/.github/workflows/rebase.yml @@ -0,0 +1,35 @@ +name: Nightly Rebase + +on: + schedule: + - cron: '0 5 * * *' # 05:00 GMT daily + workflow_dispatch: # manual trigger + +permissions: + contents: write + +jobs: + rebase: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + ref: master + fetch-depth: 0 # Full history needed for rebase + token: ${{ secrets.REBASE_PAT }} + + - name: Configure git + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + + - name: Add upstream and rebase + run: | + git remote add upstream https://github.com/bitcoin/bitcoin.git + git fetch upstream master + git rebase upstream/master + + - name: Push changes + run: | + git push --force origin master diff --git a/bench.py b/bench.py new file mode 100755 index 000000000000..2c3b9240e768 --- /dev/null +++ b/bench.py @@ -0,0 +1,655 @@ +#!/usr/bin/env python3 +"""Benchcoin - Bitcoin Core benchmarking toolkit. + +A CLI for building, benchmarking, analyzing, and reporting on Bitcoin Core +performance. + +Usage: + bench.py build COMMIT[:NAME]... Build bitcoind at one or more commits + bench.py run NAME:BINARY... Benchmark one or more binaries + bench.py analyze COMMIT LOGFILE Generate plots from debug.log + bench.py compare RESULTS... Compare benchmark results + bench.py report INPUT OUTPUT Generate HTML report + bench.py nightly append ... Append result to nightly history + bench.py nightly chart ... Generate nightly chart HTML + +Examples: + # Build two commits + bench.py build HEAD~1:before HEAD:after + + # Benchmark built binaries + bench.py run before:./binaries/before/bitcoind after:./binaries/after/bitcoind --datadir /data + + # Compare results + bench.py compare ./bench-output/results.json + + # Generate HTML report + bench.py report ./bench-output ./report + + # Append nightly result and regenerate chart + bench.py nightly append results.json abc123 default 450 + bench.py nightly chart ./index.html +""" + +from __future__ import annotations + +import argparse +import logging +import sys +from pathlib import Path + +from bench.capabilities import detect_capabilities +from bench.config import build_config + +logging.basicConfig( + level=logging.INFO, + format="%(levelname)s: %(message)s", +) +logger = logging.getLogger(__name__) + + +def cmd_build(args: argparse.Namespace) -> int: + """Build bitcoind at one or more commits.""" + from bench.build import BuildPhase + + capabilities = detect_capabilities() + config = build_config( + cli_args={ + "binaries_dir": args.output_dir, + "skip_existing": args.skip_existing, + "dry_run": args.dry_run, + "verbose": args.verbose, + }, + config_file=Path(args.config) if args.config else None, + profile=args.profile, + ) + + if args.verbose: + logging.getLogger().setLevel(logging.DEBUG) + + phase = BuildPhase(config, capabilities) + + try: + result = phase.run( + args.commits, + output_dir=Path(args.output_dir) if args.output_dir else None, + ) + logger.info(f"Built {len(result.binaries)} binary(ies):") + for binary in result.binaries: + logger.info(f" {binary.name}: {binary.path}") + return 0 + except Exception as e: + logger.error(f"Build failed: {e}") + return 1 + + +def cmd_run(args: argparse.Namespace) -> int: + """Run benchmark on one or more binaries.""" + from bench.benchmark import BenchmarkPhase, parse_binary_spec + from bench.benchmark_config import BenchmarkConfig + + capabilities = detect_capabilities() + + # Load benchmark config + benchmark_config = BenchmarkConfig.from_toml(Path(args.benchmark_config)) + + # Validate benchmark config + errors = benchmark_config.validate() + if errors: + for error in errors: + logger.error(f"Config error: {error}") + return 1 + + # Get matrix entry + matrix_entry = benchmark_config.get_matrix_entry(args.matrix_entry) + if not matrix_entry: + available = benchmark_config.get_matrix_names() + logger.error( + f"Matrix entry '{args.matrix_entry}' not found. " + f"Available: {', '.join(available)}" + ) + return 1 + logger.info(f"Using matrix entry: {matrix_entry}") + + # Build config with CLI args and benchmark config values + cli_args: dict = { + "datadir": args.datadir, + "tmp_datadir": args.tmp_datadir, + "output_dir": args.output_dir, + "no_cache_drop": args.no_cache_drop, + "dry_run": args.dry_run, + "verbose": args.verbose, + "runs": benchmark_config.runs, + } + + # Apply matrix entry values + if "dbcache" in matrix_entry: + cli_args["dbcache"] = matrix_entry["dbcache"] + if "instrumented" in matrix_entry: + cli_args["instrumented"] = matrix_entry["instrumented"] + + config = build_config( + cli_args=cli_args, + config_file=Path(args.config) if args.config else None, + profile=args.profile, + ) + + if args.verbose: + logging.getLogger().setLevel(logging.DEBUG) + + errors = config.validate() + if errors: + for error in errors: + logger.error(error) + return 1 + + # Parse binary specs + try: + binaries = [parse_binary_spec(spec) for spec in args.binaries] + except ValueError as e: + logger.error(str(e)) + return 1 + + # Validate binaries exist + for name, path in binaries: + if not path.exists(): + logger.error(f"Binary not found: {path} ({name})") + return 1 + + phase = BenchmarkPhase(config, capabilities, benchmark_config) + output_dir = Path(config.output_dir) + + try: + result = phase.run( + binaries=binaries, + datadir=Path(config.datadir) if config.datadir else None, + output_dir=output_dir, + ) + logger.info(f"Results saved to: {result.results_file}") + + # For instrumented runs, also generate plots + if config.instrumented: + from bench.analyze import AnalyzePhase + + analyze_phase = AnalyzePhase() + + for binary_result in result.binaries: + if binary_result.debug_log: + try: + analyze_phase.run( + commit=binary_result.name, + log_file=binary_result.debug_log, + output_dir=output_dir / "plots", + ) + except Exception as e: + logger.warning(f"Analysis for {binary_result.name} failed: {e}") + + return 0 + except Exception as e: + logger.error(f"Benchmark failed: {e}") + if args.verbose: + import traceback + + traceback.print_exc() + return 1 + + +def cmd_compare(args: argparse.Namespace) -> int: + """Compare benchmark results from multiple files.""" + from bench.compare import ComparePhase + + if args.verbose: + logging.getLogger().setLevel(logging.DEBUG) + + results_files = [Path(f) for f in args.results_files] + + # Validate files exist + for f in results_files: + if not f.exists(): + logger.error(f"Results file not found: {f}") + return 1 + + phase = ComparePhase() + + try: + result = phase.run(results_files, baseline=args.baseline) + + # Output results + output_json = phase.to_json(result) + + if args.output: + output_path = Path(args.output) + output_path.write_text(output_json) + logger.info(f"Comparison saved to: {output_path}") + else: + print(output_json) + + return 0 + except Exception as e: + logger.error(f"Comparison failed: {e}") + if args.verbose: + import traceback + + traceback.print_exc() + return 1 + + +def cmd_analyze(args: argparse.Namespace) -> int: + """Generate plots from debug.log.""" + from bench.analyze import AnalyzePhase + + if args.verbose: + logging.getLogger().setLevel(logging.DEBUG) + + log_file = Path(args.log_file) + output_dir = Path(args.output_dir) + + if not log_file.exists(): + logger.error(f"Log file not found: {log_file}") + return 1 + + phase = AnalyzePhase() + + try: + result = phase.run( + commit=args.commit, + log_file=log_file, + output_dir=output_dir, + ) + logger.info(f"Generated {len(result.plots)} plots in {result.output_dir}") + return 0 + except Exception as e: + logger.error(f"Analysis failed: {e}") + if args.verbose: + import traceback + + traceback.print_exc() + return 1 + + +def cmd_report(args: argparse.Namespace) -> int: + """Generate HTML report from benchmark results.""" + from bench.report import ReportPhase + + if args.verbose: + logging.getLogger().setLevel(logging.DEBUG) + + output_dir = Path(args.output_dir) + phase = ReportPhase() + + try: + # CI multi-network mode + if args.networks: + network_dirs = {} + for spec in args.networks: + if ":" not in spec: + logger.error(f"Invalid network spec '{spec}': must be NETWORK:PATH") + return 1 + network, path = spec.split(":", 1) + network_dirs[network] = Path(path) + + # Validate directories exist + for network, path in network_dirs.items(): + if not path.exists(): + logger.error(f"Network directory not found: {path} ({network})") + return 1 + + result = phase.run_multi_network( + network_dirs=network_dirs, + output_dir=output_dir, + title=args.title or "Benchmark Results", + pr_number=args.pr_number, + run_id=args.run_id, + ) + + # Update results index if we have a results directory + # Note: This writes to results/index.html, not the main index.html + # The main index.html is generated by the nightly benchmark chart + if args.update_index: + results_base = output_dir.parent.parent # Go up from pr-N/run-id + if results_base.exists(): + phase.update_index(results_base, results_base / "index.html") + else: + # Standard single-directory mode + input_dir = Path(args.input_dir) + + if not input_dir.exists(): + logger.error(f"Input directory not found: {input_dir}") + return 1 + + result = phase.run( + input_dir=input_dir, + output_dir=output_dir, + title=args.title or "Benchmark Results", + ) + + # Print speedups + if result.speedups: + logger.info("Speedups:") + for network, speedup in result.speedups.items(): + sign = "+" if speedup > 0 else "" + logger.info(f" {network}: {sign}{speedup}%") + + return 0 + except Exception as e: + logger.error(f"Report generation failed: {e}") + if args.verbose: + import traceback + + traceback.print_exc() + return 1 + + +def cmd_nightly(args: argparse.Namespace) -> int: + """Manage nightly benchmark history and charts.""" + from bench.nightly import NightlyPhase + + if args.verbose: + logging.getLogger().setLevel(logging.DEBUG) + + if not args.nightly_command: + logger.error("No nightly subcommand specified. Use 'append' or 'chart'.") + return 1 + + history_file = Path(args.history_file) + phase = NightlyPhase(history_file) + + try: + if args.nightly_command == "append": + benchmark_config_file = ( + Path(args.benchmark_config) if args.benchmark_config else None + ) + phase.append( + results_file=Path(args.results_file), + commit=args.commit, + config=args.config, + dbcache=args.dbcache, + date_str=args.date, + capture_machine=args.capture_machine, + benchmark_config_file=benchmark_config_file, + ) + logger.info(f"Appended result to {history_file}") + elif args.nightly_command == "chart": + phase.chart(output_file=Path(args.output_file)) + logger.info(f"Generated chart at {args.output_file}") + return 0 + except Exception as e: + logger.error(f"Nightly operation failed: {e}") + if args.verbose: + import traceback + + traceback.print_exc() + return 1 + + +def main() -> int: + """Main entry point.""" + parser = argparse.ArgumentParser( + description="Benchcoin - Bitcoin Core benchmarking toolkit", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=__doc__, + ) + + parser.add_argument( + "--config", + metavar="PATH", + help="Config file (default: bench.toml)", + ) + parser.add_argument( + "--profile", + choices=["quick", "full", "ci"], + default="full", + help="Configuration profile (default: full)", + ) + parser.add_argument( + "-v", + "--verbose", + action="store_true", + help="Verbose output", + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Show what would be done without executing", + ) + + subparsers = parser.add_subparsers(dest="command", help="Commands") + + # Build command + build_parser = subparsers.add_parser( + "build", + help="Build bitcoind at one or more commits", + description="Build bitcoind binaries from git commits. " + "Each commit can optionally have a name suffix: COMMIT:NAME", + ) + build_parser.add_argument( + "commits", + nargs="+", + metavar="COMMIT[:NAME]", + help="Commit(s) to build. Format: COMMIT or COMMIT:NAME (e.g., HEAD:latest, abc123:v27)", + ) + build_parser.add_argument( + "-o", + "--output-dir", + metavar="PATH", + help="Where to store binaries (default: ./binaries)", + ) + build_parser.add_argument( + "--skip-existing", + action="store_true", + help="Skip build if binary already exists", + ) + build_parser.set_defaults(func=cmd_build) + + # Run command + run_parser = subparsers.add_parser( + "run", + help="Run benchmark on one or more binaries", + description="Benchmark bitcoind binaries using hyperfine. " + "Each binary must have a name and path: NAME:PATH", + ) + run_parser.add_argument( + "binaries", + nargs="+", + metavar="NAME:PATH", + help="Binary(ies) to benchmark. Format: NAME:PATH (e.g., v27:./binaries/v27/bitcoind)", + ) + run_parser.add_argument( + "--datadir", + metavar="PATH", + help="Source datadir with blockchain snapshot (omit for fresh sync)", + ) + run_parser.add_argument( + "--tmp-datadir", + metavar="PATH", + help="Temp datadir for benchmark runs", + ) + run_parser.add_argument( + "-o", + "--output-dir", + metavar="PATH", + help="Output directory for results (default: ./bench-output)", + ) + run_parser.add_argument( + "--no-cache-drop", + action="store_true", + help="Skip cache dropping between runs", + ) + run_parser.add_argument( + "--benchmark-config", + required=True, + metavar="PATH", + help="Benchmark config TOML file (e.g., bench/configs/pr.toml)", + ) + run_parser.add_argument( + "--matrix-entry", + required=True, + metavar="NAME", + help="Matrix entry to run (e.g., 'default', 'large-instrumented')", + ) + run_parser.set_defaults(func=cmd_run) + + # Analyze command + analyze_parser = subparsers.add_parser( + "analyze", help="Generate plots from debug.log" + ) + analyze_parser.add_argument("commit", help="Commit hash (for naming)") + analyze_parser.add_argument("log_file", help="Path to debug.log") + analyze_parser.add_argument( + "--output-dir", + default="./plots", + metavar="PATH", + help="Output directory for plots", + ) + analyze_parser.set_defaults(func=cmd_analyze) + + # Compare command + compare_parser = subparsers.add_parser( + "compare", + help="Compare benchmark results from multiple files", + description="Load and compare results from one or more results.json files. " + "Calculates speedup percentages relative to a baseline.", + ) + compare_parser.add_argument( + "results_files", + nargs="+", + metavar="RESULTS_FILE", + help="results.json file(s) to compare", + ) + compare_parser.add_argument( + "--baseline", + metavar="NAME", + help="Name of the baseline entry (default: first entry)", + ) + compare_parser.add_argument( + "-o", + "--output", + metavar="FILE", + help="Output file for comparison JSON (default: stdout)", + ) + compare_parser.set_defaults(func=cmd_compare) + + # Report command + report_parser = subparsers.add_parser( + "report", + help="Generate HTML report", + description="Generate HTML report from benchmark results. " + "Use --network for multi-network CI reports.", + ) + report_parser.add_argument( + "input_dir", + nargs="?", + help="Directory with results.json (for single-network mode)", + ) + report_parser.add_argument("output_dir", help="Output directory for report") + report_parser.add_argument( + "--title", + help="Report title", + ) + # CI multi-network options + report_parser.add_argument( + "--network", + dest="networks", + action="append", + metavar="NAME:PATH", + help="Network results directory (repeatable, e.g., --network mainnet:./mainnet-results)", + ) + report_parser.add_argument( + "--pr-number", + metavar="N", + help="PR number (for CI reports)", + ) + report_parser.add_argument( + "--run-id", + metavar="ID", + help="Run ID (for CI reports)", + ) + report_parser.add_argument( + "--update-index", + action="store_true", + help="Update main index.html (for CI reports)", + ) + report_parser.set_defaults(func=cmd_report) + + # Nightly command + nightly_parser = subparsers.add_parser( + "nightly", + help="Manage nightly benchmark history and charts", + description="Commands for managing nightly benchmark results history " + "and generating the historical trend chart.", + ) + nightly_parser.add_argument( + "--history-file", + default="nightly-history.json", + metavar="PATH", + help="Path to nightly history JSON file (default: nightly-history.json)", + ) + nightly_subparsers = nightly_parser.add_subparsers( + dest="nightly_command", help="Nightly commands" + ) + + # nightly append + nightly_append = nightly_subparsers.add_parser( + "append", + help="Append a result to the nightly history", + description="Parse a hyperfine results.json file and append the result " + "to the nightly history JSON file.", + ) + nightly_append.add_argument( + "results_file", + help="Path to hyperfine results.json file", + ) + nightly_append.add_argument( + "commit", + help="Git commit hash", + ) + nightly_append.add_argument( + "config", + help="Configuration name (e.g., '450', '32000' from matrix entry)", + ) + nightly_append.add_argument( + "dbcache", + type=int, + help="DB cache size in MB (450 or 32000)", + ) + nightly_append.add_argument( + "--date", + metavar="YYYY-MM-DD", + help="Date for this result (default: today)", + ) + nightly_append.add_argument( + "--capture-machine", + action="store_true", + help="Detect and store machine specs (CPU, architecture, disk type)", + ) + nightly_append.add_argument( + "--benchmark-config", + metavar="PATH", + help="Benchmark config TOML file to store with results", + ) + + # nightly chart + nightly_chart = nightly_subparsers.add_parser( + "chart", + help="Generate the nightly trend chart HTML", + description="Generate an HTML page with an interactive Plotly chart " + "showing nightly benchmark results over time.", + ) + nightly_chart.add_argument( + "output_file", + help="Path to write the chart HTML (typically index.html)", + ) + + nightly_parser.set_defaults(func=cmd_nightly) + + args = parser.parse_args() + + if not args.command: + parser.print_help() + return 1 + + return args.func(args) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/bench/README.md b/bench/README.md new file mode 100644 index 000000000000..ca0d011303de --- /dev/null +++ b/bench/README.md @@ -0,0 +1,234 @@ +# Benchcoin + +A CLI for benchmarking Bitcoin Core IBD. + +## Quick Start + +```bash +# Quick smoke test on signet (requires nix) +nix develop --command python3 bench.py --profile quick full \ + --chain signet --datadir /path/to/signet/datadir HEAD~1 HEAD + +# Or use just (wraps nix develop) +just quick HEAD~1 HEAD /path/to/signet/datadir +``` + +## Requirements + +- **Nix** with flakes enabled (provides hyperfine, flamegraph, etc.) +- A blockchain datadir snapshot to benchmark against +- Two git commits to compare + +Optional (auto-detected, gracefully degrades without): +- `/run/wrappers/bin/drop-caches` (NixOS) - clears page cache between runs + +## Commands + +``` +bench.py [GLOBAL_OPTIONS] COMMAND [OPTIONS] ARGS + +Global Options: + --profile {quick,full,ci} Configuration profile + --config PATH Custom config file + -v, --verbose Verbose output + --dry-run Show what would run + +Commands: + build Build bitcoind at two commits + run Run benchmark (requires pre-built binaries) + analyze Generate plots from debug.log + report Generate HTML report + full Complete pipeline: build β†’ run β†’ analyze +``` + +### build + +Build bitcoind binaries at two commits for comparison: + +```bash +python3 bench.py build HEAD~1 HEAD +python3 bench.py build --binaries-dir /tmp/bins abc123 def456 +python3 bench.py build --skip-existing HEAD~1 HEAD # reuse existing +``` + +### run + +Run hyperfine benchmark comparing two pre-built binaries: + +```bash +python3 bench.py run --datadir /data/snapshot HEAD~1 HEAD +python3 bench.py run --instrumented --datadir /data/snapshot HEAD~1 HEAD +``` + +Options: +- `--datadir PATH` - Source blockchain snapshot (required) +- `--tmp-datadir PATH` - Working directory (default: ./bench-output/tmp-datadir) +- `--stop-height N` - Block height to sync to +- `--dbcache N` - Database cache in MB +- `--runs N` - Number of iterations (default: 3, forced to 1 if instrumented) +- `--instrumented` - Enable flamegraph profiling and debug logging +- `--connect ADDR` - P2P node to sync from (empty = public network) +- `--chain {main,signet,testnet,regtest}` - Which chain +- `--no-cache-drop` - Don't clear page cache between runs + +### analyze + +Generate plots from a debug.log file: + +```bash +python3 bench.py analyze abc123 /path/to/debug.log --output-dir ./plots +``` + +Generates PNG plots for: +- Block height vs time +- Cache size vs height/time +- Transaction count vs height +- LevelDB compaction events +- CoinDB write batches + +### report + +Generate HTML report from benchmark results: + +```bash +python3 bench.py report ./bench-output ./report +``` + +### full + +Run complete pipeline (build + run + analyze if instrumented): + +```bash +python3 bench.py --profile quick full --chain signet --datadir /tmp/signet HEAD~1 HEAD +python3 bench.py --profile full full --datadir /data/mainnet HEAD~1 HEAD +``` + +## Profiles + +Profiles set sensible defaults for common scenarios: + +| Profile | stop_height | runs | dbcache | connect | +|---------|-------------|------|---------|---------| +| quick | 1,500 | 1 | 450 | (public network) | +| full | 855,000 | 3 | 450 | (public network) | +| ci | 855,000 | 3 | 450 | 148.251.128.115:33333 | + +Override any profile setting with CLI flags: + +```bash +python3 bench.py --profile quick full --stop-height 5000 --datadir ... HEAD~1 HEAD +``` + +## Configuration + +Configuration is layered (lowest to highest priority): + +1. Built-in defaults +2. `bench.toml` (in repo root) +3. Environment variables (`BENCH_DATADIR`, `BENCH_DBCACHE`, etc.) +4. CLI arguments + +### bench.toml + +```toml +[defaults] +chain = "main" +dbcache = 450 +stop_height = 855000 +runs = 3 + +[paths] +binaries_dir = "./binaries" +output_dir = "./bench-output" + +[profiles.quick] +stop_height = 1500 +runs = 1 +dbcache = 450 + +[profiles.ci] +connect = "148.251.128.115:33333" +``` + +### Environment Variables + +```bash +export BENCH_DATADIR=/data/snapshot +export BENCH_DBCACHE=1000 +export BENCH_STOP_HEIGHT=100000 +``` + +## Justfile Recipes + +The justfile wraps common operations with `nix develop`: + +```bash +just quick HEAD~1 HEAD /path/to/datadir # Quick signet test +just full HEAD~1 HEAD /path/to/datadir # Full mainnet benchmark +just instrumented HEAD~1 HEAD /path/to/datadir # With flamegraphs +just build HEAD~1 HEAD # Build only +just run HEAD~1 HEAD /path/to/datadir # Run only (binaries must exist) +``` + +## Architecture + +``` +bench.py CLI entry point (argparse) +bench/ +β”œβ”€β”€ config.py Layered configuration (TOML + env + CLI) +β”œβ”€β”€ capabilities.py System capability detection +β”œβ”€β”€ build.py Build phase (nix build) +β”œβ”€β”€ benchmark.py Benchmark phase (hyperfine) +β”œβ”€β”€ analyze.py Plot generation (matplotlib) +β”œβ”€β”€ report.py HTML report generation +└── utils.py Git operations, datadir management +``` + +### Capability Detection + +The tool auto-detects system capabilities and gracefully degrades: + +```python +from bench.capabilities import detect_capabilities +caps = detect_capabilities() +# caps.has_hyperfine, caps.can_drop_caches, etc. +``` + +Missing optional features emit warnings but don't fail: + +``` +WARNING: drop-caches not available - cache won't be cleared between runs +``` + +Missing required features (hyperfine, flamegraph for instrumented) cause errors. + +### Hyperfine Integration + +The benchmark phase generates temporary shell scripts for hyperfine hooks: + +- `setup` - Clean tmp datadir (once before all runs) +- `prepare` - Copy snapshot, drop caches, clean logs (before each run) +- `cleanup` - Clean tmp datadir (after all runs per command) +- `conclude` - Collect flamegraph/logs (instrumented only, after each run) + +### Instrumented Mode + +When `--instrumented` is set: + +1. Wraps bitcoind in `flamegraph` for CPU profiling +2. Enables debug logging: `-debug=coindb -debug=leveldb -debug=bench -debug=validation` +3. Forces `runs=1` (profiling overhead makes multiple runs pointless) +4. Generates flamegraph SVGs and performance plots + +## CI Integration + +GitHub Actions workflows call bench.py directly (already in nix develop): + +```yaml +- run: | + nix develop --command python3 bench.py build \ + --binaries-dir ${{ runner.temp }}/binaries \ + $BASE_SHA $HEAD_SHA +``` + +CI-specific paths and the dedicated sync node are configured via `--profile ci`. diff --git a/bench/__init__.py b/bench/__init__.py new file mode 100644 index 000000000000..cb50424b155c --- /dev/null +++ b/bench/__init__.py @@ -0,0 +1,3 @@ +"""Benchcoin - Bitcoin Core benchmarking toolkit.""" + +__version__ = "0.1.0" diff --git a/bench/analyze.py b/bench/analyze.py new file mode 100644 index 000000000000..baedd97d745c --- /dev/null +++ b/bench/analyze.py @@ -0,0 +1,538 @@ +"""Analyze phase - parse debug.log and generate performance plots. + +Refactored from bench-ci/parse_and_plot.py for better structure and reusability. +""" + +from __future__ import annotations + +import datetime +import logging +import re +from collections import OrderedDict +from dataclasses import dataclass +from pathlib import Path + +# matplotlib is optional - gracefully handle if not installed +try: + import matplotlib.pyplot as plt + + HAS_MATPLOTLIB = True +except ImportError: + HAS_MATPLOTLIB = False + +logger = logging.getLogger(__name__) + +# Bitcoin fork heights for plot annotations +FORK_HEIGHTS = OrderedDict( + [ + ("BIP34", 227931), # Block v2, coinbase includes height + ("BIP66", 363725), # Strict DER signatures + ("BIP65", 388381), # OP_CHECKLOCKTIMEVERIFY + ("CSV", 419328), # BIP68, 112, 113 - OP_CHECKSEQUENCEVERIFY + ("Segwit", 481824), # BIP141, 143, 144, 145 - Segregated Witness + ("Taproot", 709632), # BIP341, 342 - Schnorr signatures & Taproot + ("Halving 1", 210000), # First halving + ("Halving 2", 420000), # Second halving + ("Halving 3", 630000), # Third halving + ("Halving 4", 840000), # Fourth halving + ] +) + +FORK_COLORS = { + "BIP34": "blue", + "BIP66": "blue", + "BIP65": "blue", + "CSV": "blue", + "Segwit": "green", + "Taproot": "red", + "Halving 1": "purple", + "Halving 2": "purple", + "Halving 3": "purple", + "Halving 4": "purple", +} + +FORK_STYLES = { + "BIP34": "--", + "BIP66": "--", + "BIP65": "--", + "CSV": "--", + "Segwit": "--", + "Taproot": "--", + "Halving 1": ":", + "Halving 2": ":", + "Halving 3": ":", + "Halving 4": ":", +} + + +@dataclass +class UpdateTipEntry: + """Parsed UpdateTip log entry.""" + + timestamp: datetime.datetime + height: int + tx_count: int + cache_size_mb: float + cache_coins_count: int + + +@dataclass +class LevelDBCompactEntry: + """Parsed LevelDB compaction log entry.""" + + timestamp: datetime.datetime + + +@dataclass +class LevelDBGenTableEntry: + """Parsed LevelDB generated table log entry.""" + + timestamp: datetime.datetime + keys_count: int + bytes_count: int + + +@dataclass +class ValidationTxAddEntry: + """Parsed validation transaction added log entry.""" + + timestamp: datetime.datetime + + +@dataclass +class CoinDBWriteBatchEntry: + """Parsed coindb write batch log entry.""" + + timestamp: datetime.datetime + is_partial: bool + size_mb: float + + +@dataclass +class CoinDBCommitEntry: + """Parsed coindb commit log entry.""" + + timestamp: datetime.datetime + txout_count: int + + +@dataclass +class ParsedLog: + """All parsed data from a debug.log file.""" + + update_tip: list[UpdateTipEntry] + leveldb_compact: list[LevelDBCompactEntry] + leveldb_gen_table: list[LevelDBGenTableEntry] + validation_txadd: list[ValidationTxAddEntry] + coindb_write_batch: list[CoinDBWriteBatchEntry] + coindb_commit: list[CoinDBCommitEntry] + + +@dataclass +class AnalyzeResult: + """Result of the analyze phase.""" + + commit: str + output_dir: Path + plots: list[Path] + + +class LogParser: + """Parse bitcoind debug.log files.""" + + # Regex patterns + UPDATETIP_RE = re.compile( + r"^([\d\-:TZ]+) UpdateTip: new best.+height=(\d+).+tx=(\d+).+cache=([\d.]+)MiB\((\d+)txo\)" + ) + LEVELDB_COMPACT_RE = re.compile(r"^([\d\-:TZ]+) \[leveldb] Compacting.*files") + LEVELDB_GEN_TABLE_RE = re.compile( + r"^([\d\-:TZ]+) \[leveldb] Generated table.*: (\d+) keys, (\d+) bytes" + ) + VALIDATION_TXADD_RE = re.compile( + r"^([\d\-:TZ]+) \[validation] TransactionAddedToMempool: txid=.+wtxid=.+" + ) + COINDB_WRITE_BATCH_RE = re.compile( + r"^([\d\-:TZ]+) \[coindb] Writing (partial|final) batch of ([\d.]+) MiB" + ) + COINDB_COMMIT_RE = re.compile( + r"^([\d\-:TZ]+) \[coindb] Committed (\d+) changed transaction outputs" + ) + + @staticmethod + def parse_timestamp(iso_str: str) -> datetime.datetime: + """Parse ISO 8601 timestamp from log.""" + return datetime.datetime.strptime(iso_str, "%Y-%m-%dT%H:%M:%SZ") + + def parse_file(self, log_file: Path) -> ParsedLog: + """Parse a debug.log file and extract all relevant data.""" + update_tip: list[UpdateTipEntry] = [] + leveldb_compact: list[LevelDBCompactEntry] = [] + leveldb_gen_table: list[LevelDBGenTableEntry] = [] + validation_txadd: list[ValidationTxAddEntry] = [] + coindb_write_batch: list[CoinDBWriteBatchEntry] = [] + coindb_commit: list[CoinDBCommitEntry] = [] + + with open(log_file, "r", encoding="utf-8") as f: + for line in f: + if match := self.UPDATETIP_RE.match(line): + iso_str, height, tx, cache_mb, coins = match.groups() + update_tip.append( + UpdateTipEntry( + timestamp=self.parse_timestamp(iso_str), + height=int(height), + tx_count=int(tx), + cache_size_mb=float(cache_mb), + cache_coins_count=int(coins), + ) + ) + elif match := self.LEVELDB_COMPACT_RE.match(line): + leveldb_compact.append( + LevelDBCompactEntry( + timestamp=self.parse_timestamp(match.group(1)) + ) + ) + elif match := self.LEVELDB_GEN_TABLE_RE.match(line): + iso_str, keys, bytes_count = match.groups() + leveldb_gen_table.append( + LevelDBGenTableEntry( + timestamp=self.parse_timestamp(iso_str), + keys_count=int(keys), + bytes_count=int(bytes_count), + ) + ) + elif match := self.VALIDATION_TXADD_RE.match(line): + validation_txadd.append( + ValidationTxAddEntry( + timestamp=self.parse_timestamp(match.group(1)) + ) + ) + elif match := self.COINDB_WRITE_BATCH_RE.match(line): + iso_str, batch_type, size_mb = match.groups() + coindb_write_batch.append( + CoinDBWriteBatchEntry( + timestamp=self.parse_timestamp(iso_str), + is_partial=(batch_type == "partial"), + size_mb=float(size_mb), + ) + ) + elif match := self.COINDB_COMMIT_RE.match(line): + iso_str, txout_count = match.groups() + coindb_commit.append( + CoinDBCommitEntry( + timestamp=self.parse_timestamp(iso_str), + txout_count=int(txout_count), + ) + ) + + return ParsedLog( + update_tip=update_tip, + leveldb_compact=leveldb_compact, + leveldb_gen_table=leveldb_gen_table, + validation_txadd=validation_txadd, + coindb_write_batch=coindb_write_batch, + coindb_commit=coindb_commit, + ) + + +class PlotGenerator: + """Generate performance plots from parsed log data.""" + + def __init__(self, commit: str, output_dir: Path): + self.commit = commit + self.output_dir = output_dir + self.generated_plots: list[Path] = [] + + if not HAS_MATPLOTLIB: + raise RuntimeError( + "matplotlib is required for plot generation. " + "Install with: pip install matplotlib" + ) + + def generate_all(self, data: ParsedLog) -> list[Path]: + """Generate all plots from parsed data.""" + if not data.update_tip: + logger.warning("No UpdateTip entries found, skipping plot generation") + return [] + + # Verify entries are sorted by time + for i in range(len(data.update_tip) - 1): + if data.update_tip[i].timestamp > data.update_tip[i + 1].timestamp: + logger.warning("UpdateTip entries are not sorted by time") + break + + # Extract base time for elapsed calculations + base_time = data.update_tip[0].timestamp + + # Extract data series + times = [e.timestamp for e in data.update_tip] + heights = [e.height for e in data.update_tip] + tx_counts = [e.tx_count for e in data.update_tip] + cache_sizes = [e.cache_size_mb for e in data.update_tip] + cache_counts = [e.cache_coins_count for e in data.update_tip] + elapsed_minutes = [(t - base_time).total_seconds() / 60 for t in times] + + # Generate core plots + self._plot( + elapsed_minutes, + heights, + "Elapsed minutes", + "Block Height", + "Block Height vs Time", + f"{self.commit}-height_vs_time.png", + ) + + self._plot( + heights, + cache_sizes, + "Block Height", + "Cache Size (MiB)", + "Cache Size vs Block Height", + f"{self.commit}-cache_vs_height.png", + is_height_based=True, + ) + + self._plot( + elapsed_minutes, + cache_sizes, + "Elapsed minutes", + "Cache Size (MiB)", + "Cache Size vs Time", + f"{self.commit}-cache_vs_time.png", + ) + + self._plot( + heights, + tx_counts, + "Block Height", + "Transaction Count", + "Transactions vs Block Height", + f"{self.commit}-tx_vs_height.png", + is_height_based=True, + ) + + self._plot( + heights, + cache_counts, + "Block Height", + "Coins Cache Size", + "Coins Cache Size vs Height", + f"{self.commit}-coins_cache_vs_height.png", + is_height_based=True, + ) + + # LevelDB plots + if data.leveldb_compact: + compact_minutes = [ + (e.timestamp - base_time).total_seconds() / 60 + for e in data.leveldb_compact + ] + self._plot( + compact_minutes, + [1] * len(compact_minutes), + "Elapsed minutes", + "LevelDB Compaction", + "LevelDB Compaction Events vs Time", + f"{self.commit}-leveldb_compact_vs_time.png", + ) + + if data.leveldb_gen_table: + gen_minutes = [ + (e.timestamp - base_time).total_seconds() / 60 + for e in data.leveldb_gen_table + ] + gen_keys = [e.keys_count for e in data.leveldb_gen_table] + gen_bytes = [e.bytes_count for e in data.leveldb_gen_table] + + self._plot( + gen_minutes, + gen_keys, + "Elapsed minutes", + "Number of keys", + "LevelDB Keys Generated vs Time", + f"{self.commit}-leveldb_gen_keys_vs_time.png", + ) + + self._plot( + gen_minutes, + gen_bytes, + "Elapsed minutes", + "Number of bytes", + "LevelDB Bytes Generated vs Time", + f"{self.commit}-leveldb_gen_bytes_vs_time.png", + ) + + # Validation plots + if data.validation_txadd: + txadd_minutes = [ + (e.timestamp - base_time).total_seconds() / 60 + for e in data.validation_txadd + ] + self._plot( + txadd_minutes, + [1] * len(txadd_minutes), + "Elapsed minutes", + "Transaction Additions", + "Transaction Additions to Mempool vs Time", + f"{self.commit}-validation_txadd_vs_time.png", + ) + + # CoinDB plots + if data.coindb_write_batch: + batch_minutes = [ + (e.timestamp - base_time).total_seconds() / 60 + for e in data.coindb_write_batch + ] + batch_sizes = [e.size_mb for e in data.coindb_write_batch] + self._plot( + batch_minutes, + batch_sizes, + "Elapsed minutes", + "Batch Size MiB", + "Coin Database Partial/Final Write Batch Size vs Time", + f"{self.commit}-coindb_write_batch_size_vs_time.png", + ) + + if data.coindb_commit: + commit_minutes = [ + (e.timestamp - base_time).total_seconds() / 60 + for e in data.coindb_commit + ] + commit_txouts = [e.txout_count for e in data.coindb_commit] + self._plot( + commit_minutes, + commit_txouts, + "Elapsed minutes", + "Transaction Output Count", + "Coin Database Transaction Output Committed vs Time", + f"{self.commit}-coindb_commit_txout_vs_time.png", + ) + + return self.generated_plots + + def _plot( + self, + x: list, + y: list, + x_label: str, + y_label: str, + title: str, + filename: str, + is_height_based: bool = False, + ) -> None: + """Generate a single plot.""" + if not x or not y: + logger.debug(f"Skipping plot '{title}' - no data") + return + + plt.figure(figsize=(30, 10)) + plt.plot(x, y) + plt.title(title, fontsize=20) + plt.xlabel(x_label, fontsize=16) + plt.ylabel(y_label, fontsize=16) + plt.grid(True) + + min_x, max_x = min(x), max(x) + if min_x < max_x: + plt.xlim(min_x, max_x) + + # Add fork markers for height-based plots + if is_height_based: + self._add_fork_markers(min_x, max_x, max(y)) + + plt.xticks(rotation=90, fontsize=12) + plt.yticks(fontsize=12) + plt.tight_layout() + + output_path = self.output_dir / filename + plt.savefig(output_path) + plt.close() + + self.generated_plots.append(output_path) + logger.info(f"Saved plot: {output_path}") + + def _add_fork_markers(self, min_x: float, max_x: float, max_y: float) -> None: + """Add vertical lines for Bitcoin forks.""" + text_positions = {} + position_increment = max_y * 0.05 + current_position = max_y * 0.9 + + for fork_name, height in FORK_HEIGHTS.items(): + if min_x <= height <= max_x: + plt.axvline( + x=height, + color=FORK_COLORS[fork_name], + linestyle=FORK_STYLES[fork_name], + ) + + if height in text_positions: + text_positions[height] -= position_increment + else: + text_positions[height] = current_position + current_position -= position_increment + if current_position < max_y * 0.1: + current_position = max_y * 0.9 + + plt.text( + height, + text_positions[height], + f"{fork_name} ({height})", + rotation=90, + verticalalignment="top", + color=FORK_COLORS[fork_name], + ) + + +class AnalyzePhase: + """Analyze benchmark results and generate plots.""" + + def run( + self, + commit: str, + log_file: Path, + output_dir: Path, + ) -> AnalyzeResult: + """Analyze a debug.log and generate plots. + + Args: + commit: Commit hash (for naming) + log_file: Path to debug.log + output_dir: Where to save plots + + Returns: + AnalyzeResult with paths to generated plots + """ + if not HAS_MATPLOTLIB: + raise RuntimeError( + "matplotlib is required for plot generation. " + "Install with: pip install matplotlib" + ) + + if not log_file.exists(): + raise FileNotFoundError(f"Log file not found: {log_file}") + + output_dir.mkdir(parents=True, exist_ok=True) + + logger.info(f"Parsing log file: {log_file}") + parser = LogParser() + data = parser.parse_file(log_file) + + # Log parsed data summary + logger.info(f" UpdateTip entries: {len(data.update_tip)}") + logger.info(f" LevelDB compact entries: {len(data.leveldb_compact)}") + logger.info(f" LevelDB gen table entries: {len(data.leveldb_gen_table)}") + logger.info(f" Validation txadd entries: {len(data.validation_txadd)}") + logger.info(f" CoinDB write batch entries: {len(data.coindb_write_batch)}") + logger.info(f" CoinDB commit entries: {len(data.coindb_commit)}") + + logger.info(f"Generating plots for {commit[:12]}") + logger.info(f" Output directory: {output_dir}") + generator = PlotGenerator(commit[:12], output_dir) + plots = generator.generate_all(data) + + logger.info(f"Generated {len(plots)} plots") + + return AnalyzeResult( + commit=commit, + output_dir=output_dir, + plots=plots, + ) diff --git a/bench/benchmark.py b/bench/benchmark.py new file mode 100644 index 000000000000..60dddb5db725 --- /dev/null +++ b/bench/benchmark.py @@ -0,0 +1,362 @@ +"""Benchmark phase - run hyperfine benchmarks on bitcoind binaries.""" + +from __future__ import annotations + +import logging +import os +import shutil +import subprocess +import tempfile +from dataclasses import dataclass, field +from pathlib import Path +from typing import TYPE_CHECKING + +from .patchelf import ensure_binary_runnable + +if TYPE_CHECKING: + from .benchmark_config import BenchmarkConfig + from .capabilities import Capabilities + from .config import Config + + +logger = logging.getLogger(__name__) + +# Debug flags for instrumented mode +INSTRUMENTED_DEBUG_FLAGS = ["coindb", "leveldb", "bench", "validation"] + + +@dataclass +class BinaryResult: + """Result for a single binary.""" + + name: str + flamegraph: Path | None = None + debug_log: Path | None = None + + +@dataclass +class BenchmarkResult: + """Result of the benchmark phase.""" + + results_file: Path + instrumented: bool + binaries: list[BinaryResult] = field(default_factory=list) + + +def parse_binary_spec(spec: str) -> tuple[str, Path]: + """Parse a binary spec like 'name:/path/to/binary'. + + Returns (name, path). + """ + if ":" not in spec: + raise ValueError(f"Invalid binary spec '{spec}': must be NAME:PATH") + name, path_str = spec.split(":", 1) + if not name: + raise ValueError(f"Invalid binary spec '{spec}': name cannot be empty") + return name, Path(path_str) + + +class BenchmarkPhase: + """Run hyperfine benchmarks on bitcoind binaries.""" + + def __init__( + self, + config: Config, + capabilities: Capabilities, + benchmark_config: BenchmarkConfig | None = None, + ): + self.config = config + self.capabilities = capabilities + self.benchmark_config = benchmark_config + self._temp_scripts: list[Path] = [] + + def run( + self, + binaries: list[tuple[str, Path]], + datadir: Path | None, + output_dir: Path, + ) -> BenchmarkResult: + """Run benchmarks on given binaries. + + Args: + binaries: List of (name, binary_path) tuples + datadir: Source datadir with blockchain snapshot (None for fresh sync) + output_dir: Where to store results + + Returns: + BenchmarkResult with paths to outputs + """ + if not binaries: + raise ValueError("At least one binary is required") + + # Validate all binaries exist + for name, path in binaries: + if not path.exists(): + raise FileNotFoundError(f"Binary not found: {path} ({name})") + + # Ensure binaries can run on this system (patches guix binaries on NixOS) + for name, path in binaries: + if not ensure_binary_runnable(path): + raise RuntimeError(f"Binary {name} at {path} cannot be made runnable") + + # Check prerequisites + errors = self.capabilities.check_for_run(self.config.instrumented) + if errors: + raise RuntimeError("Benchmark prerequisites not met:\n" + "\n".join(errors)) + + # Log warnings about missing optional capabilities + for warning in self.capabilities.get_warnings(): + logger.warning(warning) + + # Setup directories + output_dir.mkdir(parents=True, exist_ok=True) + tmp_datadir = Path(self.config.tmp_datadir) + tmp_datadir.mkdir(parents=True, exist_ok=True) + + results_file = output_dir / "results.json" + + logger.info("Starting benchmark") + logger.info(f" Output dir: {output_dir}") + logger.info(f" Temp datadir: {tmp_datadir}") + if datadir: + logger.info(f" Source datadir: {datadir}") + else: + logger.info(" Mode: Fresh sync (no source datadir)") + logger.info(f" Binaries: {len(binaries)}") + for name, path in binaries: + logger.info(f" {name}: {path}") + logger.info(f" Instrumented: {self.config.instrumented}") + logger.info(f" Runs: {self.config.runs}") + logger.info(f" dbcache: {self.config.dbcache}") + if self.benchmark_config: + logger.info(f" Config: {self.benchmark_config.source_file}") + + try: + # Create hook scripts for hyperfine + setup_script = self._create_setup_script(tmp_datadir) + prepare_script = self._create_prepare_script(tmp_datadir, datadir) + cleanup_script = self._create_cleanup_script(tmp_datadir) + + # Build hyperfine command + cmd = self._build_hyperfine_cmd( + binaries=binaries, + tmp_datadir=tmp_datadir, + results_file=results_file, + setup_script=setup_script, + prepare_script=prepare_script, + cleanup_script=cleanup_script, + output_dir=output_dir, + ) + + # Log the commands being benchmarked + logger.info("Commands to benchmark:") + for name, path in binaries: + bitcoind_cmd = self._build_bitcoind_cmd(path, tmp_datadir) + logger.info(f" {name}: {bitcoind_cmd}") + + if self.config.dry_run: + logger.info(f"[DRY RUN] Would run: {' '.join(cmd)}") + return BenchmarkResult( + results_file=results_file, + instrumented=self.config.instrumented, + ) + + # Log the full hyperfine command + logger.info("Running hyperfine...") + logger.info(f" Command: {' '.join(cmd[:7])} ...") # First few args + logger.debug(f" Full command: {' '.join(cmd)}") + subprocess.run(cmd, check=True) + + # Collect results + benchmark_result = BenchmarkResult( + results_file=results_file, + instrumented=self.config.instrumented, + ) + + # For instrumented runs, collect flamegraphs and debug logs + if self.config.instrumented: + logger.info("Collecting instrumented artifacts...") + for name, _path in binaries: + binary_result = BinaryResult(name=name) + + flamegraph_file = output_dir / f"{name}-flamegraph.svg" + debug_log_file = output_dir / f"{name}-debug.log" + + if flamegraph_file.exists(): + binary_result.flamegraph = flamegraph_file + logger.info(f" Flamegraph ({name}): {flamegraph_file}") + if debug_log_file.exists(): + binary_result.debug_log = debug_log_file + logger.info(f" Debug log ({name}): {debug_log_file}") + + benchmark_result.binaries.append(binary_result) + + # Clean up tmp_datadir + if tmp_datadir.exists(): + logger.debug(f"Cleaning up tmp_datadir: {tmp_datadir}") + shutil.rmtree(tmp_datadir) + + return benchmark_result + + finally: + # Clean up temp scripts + for script in self._temp_scripts: + if script.exists(): + script.unlink() + self._temp_scripts.clear() + + def _create_temp_script(self, commands: list[str], name: str) -> Path: + """Create a temporary shell script.""" + content = "#!/usr/bin/env bash\nset -euxo pipefail\n" + content += "\n".join(commands) + "\n" + + fd, path = tempfile.mkstemp(suffix=".sh", prefix=f"bench_{name}_") + os.write(fd, content.encode()) + os.close(fd) + os.chmod(path, 0o755) + + script_path = Path(path) + self._temp_scripts.append(script_path) + logger.debug(f"Created {name} script: {script_path}") + for cmd in commands: + logger.debug(f" {cmd}") + return script_path + + def _create_setup_script(self, tmp_datadir: Path) -> Path: + """Create setup script (runs once before all timing runs).""" + commands = [ + f'mkdir -p "{tmp_datadir}"', + f'rm -rf "{tmp_datadir}"/*', + ] + return self._create_temp_script(commands, "setup") + + def _create_prepare_script( + self, tmp_datadir: Path, original_datadir: Path | None + ) -> Path: + """Create prepare script (runs before each timing run).""" + commands = [ + f'rm -rf "{tmp_datadir}"/*', + ] + + # Copy datadir if provided (skip for fresh sync) + if original_datadir: + commands.append(f'cp -r "{original_datadir}"/* "{tmp_datadir}"') + + # Drop caches if available + if self.capabilities.can_drop_caches and not self.config.no_cache_drop: + commands.append(self.capabilities.drop_caches_path) + + # Clean debug logs + commands.append( + f'find "{tmp_datadir}" -name debug.log -delete 2>/dev/null || true' + ) + + return self._create_temp_script(commands, "prepare") + + def _create_cleanup_script(self, tmp_datadir: Path) -> Path: + """Create cleanup script (runs after all timing runs for each command).""" + commands = [ + f'rm -rf "{tmp_datadir}"/*', + ] + return self._create_temp_script(commands, "cleanup") + + def _build_bitcoind_cmd( + self, + binary: Path, + tmp_datadir: Path, + ) -> str: + """Build the bitcoind command string for hyperfine.""" + if not self.benchmark_config: + raise ValueError("benchmark_config is required") + + parts = [] + + # Add flamegraph wrapper for instrumented mode + if self.config.instrumented: + parts.append("flamegraph") + parts.append("--palette bitcoin") + parts.append("--title 'bitcoind IBD'") + parts.append("-c 'record -F 101 --call-graph fp'") + parts.append("--") + + # Bitcoind command + parts.append(str(binary)) + parts.append(f"-datadir={tmp_datadir}") + + # Add dbcache from matrix entry + parts.append(f"-dbcache={self.config.dbcache}") + + # Add all bitcoind args from benchmark config + for key, value in self.benchmark_config.bitcoind_args.items(): + formatted = self.benchmark_config._format_bitcoind_arg(key, value) + if formatted: + parts.append(formatted) + + # Debug flags for instrumented mode + if self.config.instrumented and self.benchmark_config.instrumented_debug: + for flag in self.benchmark_config.instrumented_debug: + parts.append(f"-debug={flag}") + + return " ".join(parts) + + def _build_hyperfine_cmd( + self, + binaries: list[tuple[str, Path]], + tmp_datadir: Path, + results_file: Path, + setup_script: Path, + prepare_script: Path, + cleanup_script: Path, + output_dir: Path, + ) -> list[str]: + """Build the hyperfine command.""" + cmd = [ + "hyperfine", + "--shell=bash", + f"--setup={setup_script}", + f"--prepare={prepare_script}", + f"--cleanup={cleanup_script}", + f"--runs={self.config.runs}", + f"--export-json={results_file}", + "--show-output", + ] + + # Add command names and build commands + for name, binary_path in binaries: + cmd.append(f"--command-name={name}") + + # Build the actual commands to benchmark + for name, binary_path in binaries: + bitcoind_cmd = self._build_bitcoind_cmd(binary_path, tmp_datadir) + + # For instrumented runs, append the conclude logic to each command + if self.config.instrumented: + conclude = self._create_conclude_commands(name, tmp_datadir, output_dir) + bitcoind_cmd += f" && {conclude}" + + cmd.append(bitcoind_cmd) + + return cmd + + def _create_conclude_commands( + self, + name: str, + tmp_datadir: Path, + output_dir: Path, + ) -> str: + """Create inline conclude commands for a specific binary.""" + # Return shell commands to run after each benchmark + commands = [] + + # Move flamegraph if exists + commands.append( + f'if [ -e flamegraph.svg ]; then mv flamegraph.svg "{output_dir}/{name}-flamegraph.svg"; fi' + ) + + # Copy debug log if exists + commands.append( + f'debug_log=$(find "{tmp_datadir}" -name debug.log -print -quit); ' + f'if [ -n "$debug_log" ]; then cp "$debug_log" "{output_dir}/{name}-debug.log"; fi' + ) + + return " && ".join(commands) diff --git a/bench/benchmark_config.py b/bench/benchmark_config.py new file mode 100644 index 000000000000..e5118248c6d1 --- /dev/null +++ b/bench/benchmark_config.py @@ -0,0 +1,249 @@ +"""Benchmark configuration from TOML files. + +Provides a portable, reproducible benchmark config that can be shared +to run identical benchmarks on different machines. +""" + +from __future__ import annotations + +import itertools +import logging +import tomllib +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any + +logger = logging.getLogger(__name__) + + +@dataclass +class BenchmarkConfig: + """Benchmark configuration loaded from TOML. + + This represents a portable benchmark specification that can be shared + to reproduce benchmarks on different machines. + """ + + # Benchmark metadata + start_height: int + runs: int + + # Parameter matrix - each key maps to list of values + # These create multiple benchmark configurations + matrix: dict[str, list[Any]] = field(default_factory=dict) + + # All bitcoind flags (optional - empty/missing values excluded from command) + bitcoind_args: dict[str, Any] = field(default_factory=dict) + + # Instrumented mode debug flags + instrumented_debug: list[str] = field(default_factory=list) + + # Source file path (for reference) + source_file: Path | None = None + + @classmethod + def from_toml(cls, path: Path) -> BenchmarkConfig: + """Load configuration from a TOML file. + + Expected format: + [benchmark] + start_height = 840000 + runs = 2 + + [bitcoind] + stopatheight = 855000 + chain = "main" + connect = "..." + prune = 10000 + daemon = false + printtoconsole = false + + [bitcoind.matrix] + dbcache = [450, 32000] + instrumented = [false, true] + + [bitcoind.instrumented] + debug = ["coindb", "leveldb", "bench", "validation"] + """ + with open(path, "rb") as f: + data = tomllib.load(f) + + benchmark = data.get("benchmark", {}) + bitcoind = data.get("bitcoind", {}).copy() + + # Extract matrix from bitcoind section + matrix: dict[str, list[Any]] = bitcoind.pop("matrix", {}) + + # Extract instrumented debug flags (separate from regular bitcoind args) + instrumented = bitcoind.pop("instrumented", {}) + instrumented_debug = instrumented.get("debug", []) + + config = cls( + start_height=benchmark.get("start_height", 0), + runs=benchmark.get("runs", 3), + matrix=matrix, + bitcoind_args=bitcoind, + instrumented_debug=instrumented_debug, + source_file=path, + ) + + logger.info(f"Loaded benchmark config from {path}") + logger.info(f" Start height: {config.start_height}, Runs: {config.runs}") + if config.matrix: + logger.info(f" Matrix parameters: {list(config.matrix.keys())}") + if config.bitcoind_args: + logger.info(f" Bitcoind flags: {list(config.bitcoind_args.keys())}") + + return config + + @staticmethod + def _value_to_name(value: Any) -> str: + """Convert a matrix value to a name string.""" + if isinstance(value, bool): + return str(value).lower() + return str(value) + + def expand_matrix(self) -> list[dict[str, Any]]: + """Expand parameter matrix into list of configurations. + + Returns list of dicts, each containing: + - name: combined name from values like "450-false" + - All parameter values from the matrix + + Example: + matrix = { + 'dbcache': [450, 32000], + 'instrumented': [false, true] + } + + Returns: + [ + {'name': '450-false', 'dbcache': 450, 'instrumented': False}, + {'name': '450-true', 'dbcache': 450, 'instrumented': True}, + {'name': '32000-false', 'dbcache': 32000, 'instrumented': False}, + {'name': '32000-true', 'dbcache': 32000, 'instrumented': True}, + ] + """ + if not self.matrix: + return [{"name": "default"}] + + # Get all parameter names and their values + param_names = list(self.matrix.keys()) + param_values = [self.matrix[name] for name in param_names] + + # Generate all combinations + results = [] + for combination in itertools.product(*param_values): + entry: dict[str, Any] = {} + + # Build combined name from values + name_parts = [self._value_to_name(v) for v in combination] + entry["name"] = "-".join(name_parts) + + # Add each parameter value + for param_name, value in zip(param_names, combination): + entry[param_name] = value + + results.append(entry) + + return results + + def get_matrix_entry(self, name: str) -> dict[str, Any] | None: + """Get a specific matrix entry by its combined name. + + Args: + name: Combined name like "default-uninstrumented" + + Returns: + Dict with parameter values, or None if not found + """ + for entry in self.expand_matrix(): + if entry["name"] == name: + return entry + return None + + def get_matrix_names(self) -> list[str]: + """Get list of all matrix entry names.""" + return [entry["name"] for entry in self.expand_matrix()] + + def _format_bitcoind_arg(self, key: str, value: Any) -> str | None: + """Format a single bitcoind argument, returning None if it should be skipped.""" + # Skip empty strings and None + if value is None or value == "": + return None + + # Format based on type + if isinstance(value, bool): + return f"-{key}={1 if value else 0}" + else: + return f"-{key}={value}" + + def generate_command_template(self) -> str: + """Generate bitcoind command template with placeholders. + + Placeholders use {param} format for matrix parameters. + Empty/missing bitcoind args are excluded. + + Returns command like: + bitcoind -datadir={datadir} -dbcache={dbcache} -stopatheight=855000 ... + """ + parts = ["bitcoind"] + + # Placeholder for datadir (always user-provided) + parts.append("-datadir={datadir}") + + # Matrix parameters as placeholders + for param_name in self.matrix.keys(): + if param_name != "instrumented": # instrumented is a flag, not a param + parts.append(f"-{param_name}={{{param_name}}}") + + # Bitcoind args from config (skip empty/missing) + for key, value in self.bitcoind_args.items(): + formatted = self._format_bitcoind_arg(key, value) + if formatted: + parts.append(formatted) + + return " ".join(parts) + + def get_bitcoind_arg(self, key: str, default: Any = None) -> Any: + """Get a bitcoind arg value, with optional default.""" + return self.bitcoind_args.get(key, default) + + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary for JSON serialization. + + This captures the config for logging with results. + """ + result: dict[str, Any] = { + "start_height": self.start_height, + "runs": self.runs, + "command_template": self.generate_command_template(), + } + + # Include non-empty bitcoind args + bitcoind = {k: v for k, v in self.bitcoind_args.items() if v not in (None, "")} + if bitcoind: + result["bitcoind"] = bitcoind + + # Include matrix definition + if self.matrix: + result["matrix"] = self.matrix + + return result + + def validate(self) -> list[str]: + """Validate configuration, return list of errors.""" + errors = [] + + if self.start_height < 0: + errors.append("start_height must be non-negative") + + if self.runs < 1: + errors.append("runs must be positive") + + # Validate matrix entries are non-empty lists + for param_name, values in self.matrix.items(): + if not values: + errors.append(f"matrix.{param_name} must have at least one value") + + return errors diff --git a/bench/build.py b/bench/build.py new file mode 100644 index 000000000000..6187263a73de --- /dev/null +++ b/bench/build.py @@ -0,0 +1,197 @@ +"""Build phase - compile bitcoind at specified commits.""" + +from __future__ import annotations + +import logging +import shutil +import subprocess +from dataclasses import dataclass +from pathlib import Path +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from .capabilities import Capabilities + from .config import Config + +from .utils import GitState, git_checkout, git_rev_parse + +logger = logging.getLogger(__name__) + + +@dataclass +class BuiltBinary: + """A single built binary.""" + + name: str + path: Path + commit: str + + +@dataclass +class BuildResult: + """Result of the build phase.""" + + binaries: list[BuiltBinary] + + +def parse_commit_spec(spec: str) -> tuple[str, str | None]: + """Parse a commit spec like 'abc123:name' or 'abc123'. + + Returns (commit, name) where name may be None. + """ + if ":" in spec: + commit, name = spec.split(":", 1) + return commit, name + return spec, None + + +class BuildPhase: + """Build bitcoind binaries at specified commits.""" + + def __init__( + self, + config: Config, + capabilities: Capabilities, + repo_path: Path | None = None, + ): + self.config = config + self.capabilities = capabilities + self.repo_path = repo_path or Path.cwd() + + def run( + self, + commit_specs: list[str], + output_dir: Path | None = None, + ) -> BuildResult: + """Build bitcoind at given commits. + + Args: + commit_specs: List of commit specs like 'abc123:name' or 'abc123' + output_dir: Where to store binaries (default: ./binaries) + + Returns: + BuildResult with list of built binaries + """ + # Check prerequisites + errors = self.capabilities.check_for_build() + if errors: + raise RuntimeError("Build prerequisites not met:\n" + "\n".join(errors)) + + output_dir = output_dir or Path(self.config.binaries_dir) + + # Parse commit specs and resolve to full hashes + commits: list[tuple[str, str, str]] = [] # (commit_hash, name, original_spec) + for spec in commit_specs: + commit, name = parse_commit_spec(spec) + commit_hash = git_rev_parse(commit, self.repo_path) + # Default name to short hash if not provided + if name is None: + name = commit_hash[:12] + commits.append((commit_hash, name, spec)) + + logger.info(f"Building {len(commits)} binary(ies):") + for commit_hash, name, spec in commits: + logger.info(f" {name}: {commit_hash[:12]} ({spec})") + logger.info(f" Repo: {self.repo_path}") + logger.info(f" Output: {output_dir}") + + # Check if we can skip existing builds + binaries_to_build: list[ + tuple[str, str, Path] + ] = [] # (commit_hash, name, output_path) + for commit_hash, name, _spec in commits: + binary_dir = output_dir / name + binary_dir.mkdir(parents=True, exist_ok=True) + binary_path = binary_dir / "bitcoind" + + if self.config.skip_existing and binary_path.exists(): + logger.info(f" Skipping {name} - binary exists") + else: + binaries_to_build.append((commit_hash, name, binary_path)) + + if not binaries_to_build: + logger.info("All binaries exist and --skip-existing set, skipping build") + return BuildResult( + binaries=[ + BuiltBinary( + name=name, + path=output_dir / name / "bitcoind", + commit=commit_hash, + ) + for commit_hash, name, _spec in commits + ] + ) + + # Save git state for restoration + git_state = GitState(self.repo_path) + git_state.save() + + built_binaries: list[BuiltBinary] = [] + + try: + for commit_hash, name, output_path in binaries_to_build: + self._build_commit(name, commit_hash, output_path) + built_binaries.append( + BuiltBinary(name=name, path=output_path, commit=commit_hash) + ) + + finally: + # Always restore git state + git_state.restore() + + # Include skipped binaries in result + all_binaries = [] + for commit_hash, name, _spec in commits: + binary_path = output_dir / name / "bitcoind" + all_binaries.append( + BuiltBinary(name=name, path=binary_path, commit=commit_hash) + ) + + return BuildResult(binaries=all_binaries) + + def _build_commit(self, name: str, commit: str, output_path: Path) -> None: + """Build bitcoind for a single commit.""" + logger.info(f"Building {name} ({commit[:12]})") + + if self.config.dry_run: + logger.info(f" [DRY RUN] Would build {commit[:12]} -> {output_path}") + return + + # Checkout the commit + logger.info(f" Checking out {commit[:12]}...") + git_checkout(commit, self.repo_path) + + # Build with nix + cmd = ["nix", "build", "-L"] + + logger.info(f" Running: {' '.join(cmd)}") + logger.info(f" Working directory: {self.repo_path}") + result = subprocess.run( + cmd, + cwd=self.repo_path, + ) + + if result.returncode != 0: + raise RuntimeError(f"Build failed for {name} ({commit[:12]})") + + # Copy binary to output location + nix_binary = self.repo_path / "result" / "bin" / "bitcoind" + if not nix_binary.exists(): + raise RuntimeError(f"Built binary not found at {nix_binary}") + + logger.info(f" Copying {nix_binary} -> {output_path}") + + # Remove existing binary if present (may be read-only from nix) + if output_path.exists(): + output_path.chmod(0o755) + output_path.unlink() + + shutil.copy2(nix_binary, output_path) + output_path.chmod(0o755) # Ensure it's executable and writable + logger.info(f" Built {name} binary: {output_path}") + + # Clean up nix result symlink + result_link = self.repo_path / "result" + if result_link.is_symlink(): + logger.debug(f" Removing nix result symlink: {result_link}") + result_link.unlink() diff --git a/bench/capabilities.py b/bench/capabilities.py new file mode 100644 index 000000000000..31b6bd59f05f --- /dev/null +++ b/bench/capabilities.py @@ -0,0 +1,117 @@ +"""System capability detection for graceful degradation. + +Detects available tools and features, allowing the benchmark to run +on systems without all capabilities (with appropriate warnings). +""" + +from __future__ import annotations + +import os +import shutil +from dataclasses import dataclass +from pathlib import Path + + +# Known paths for drop-caches on NixOS +DROP_CACHES_PATHS = [ + "/run/wrappers/bin/drop-caches", + "/usr/local/bin/drop-caches", +] + + +@dataclass +class Capabilities: + """Detected system capabilities.""" + + # Cache management + can_drop_caches: bool + drop_caches_path: str | None + + # Required tools + has_hyperfine: bool + has_flamegraph: bool + has_perf: bool + has_nix: bool + + # System info + cpu_count: int + is_nixos: bool + is_ci: bool + + def check_for_run(self, instrumented: bool = False) -> list[str]: + """Check if we have required capabilities for a benchmark run. + + Returns list of errors (empty if all good). + """ + errors = [] + + if not self.has_hyperfine: + errors.append("hyperfine not found in PATH (required for benchmarking)") + + if instrumented: + if not self.has_flamegraph: + errors.append( + "flamegraph not found in PATH (required for --instrumented)" + ) + if not self.has_perf: + errors.append("perf not found in PATH (required for --instrumented)") + + return errors + + def check_for_build(self) -> list[str]: + """Check if we have required capabilities for building. + + Returns list of errors (empty if all good). + """ + errors = [] + + if not self.has_nix: + errors.append("nix not found in PATH (required for building)") + + return errors + + def get_warnings(self) -> list[str]: + """Get warnings about missing optional capabilities.""" + warnings = [] + + if not self.can_drop_caches: + warnings.append( + "drop-caches not available - cache won't be cleared between runs" + ) + + return warnings + + +def _check_executable(name: str) -> bool: + """Check if an executable is available in PATH.""" + return shutil.which(name) is not None + + +def _find_drop_caches() -> str | None: + """Find drop-caches executable.""" + for path in DROP_CACHES_PATHS: + if Path(path).exists() and os.access(path, os.X_OK): + return path + return None + + +def _is_nixos() -> bool: + """Check if we're running on NixOS.""" + return Path("/etc/NIXOS").exists() + + +def detect_capabilities() -> Capabilities: + """Auto-detect system capabilities.""" + drop_caches_path = _find_drop_caches() + + return Capabilities( + can_drop_caches=drop_caches_path is not None, + drop_caches_path=drop_caches_path, + has_hyperfine=_check_executable("hyperfine"), + has_flamegraph=_check_executable("flamegraph"), + has_perf=_check_executable("perf"), + has_nix=_check_executable("nix"), + cpu_count=os.cpu_count() or 1, + is_nixos=_is_nixos(), + is_ci=os.environ.get("CI", "").lower() in ("true", "1", "yes"), + ) diff --git a/bench/compare.py b/bench/compare.py new file mode 100644 index 000000000000..fac328841634 --- /dev/null +++ b/bench/compare.py @@ -0,0 +1,180 @@ +"""Compare phase - compare benchmark results from multiple runs.""" + +from __future__ import annotations + +import json +import logging +from dataclasses import dataclass +from pathlib import Path + +logger = logging.getLogger(__name__) + + +@dataclass +class BenchmarkEntry: + """A single benchmark entry from results.json.""" + + command: str + mean: float + stddev: float | None + user: float + system: float + min: float + max: float + times: list[float] + + +@dataclass +class Comparison: + """Comparison of one entry against the baseline.""" + + name: str + mean: float + baseline_mean: float + speedup_percent: float + stddev: float | None + + +@dataclass +class CompareResult: + """Result of comparison.""" + + baseline: str + comparisons: list[Comparison] + + +class ComparePhase: + """Compare benchmark results from multiple results.json files.""" + + def run( + self, + results_files: list[Path], + baseline: str | None = None, + ) -> CompareResult: + """Compare benchmark results. + + Args: + results_files: List of results.json files to compare + baseline: Name of the baseline entry (default: first entry) + + Returns: + CompareResult with comparison data + """ + if not results_files: + raise ValueError("At least one results file is required") + + # Load all entries from all files + all_entries: list[BenchmarkEntry] = [] + for results_file in results_files: + if not results_file.exists(): + raise FileNotFoundError(f"Results file not found: {results_file}") + + logger.info(f"Loading results from: {results_file}") + with open(results_file) as f: + data = json.load(f) + + entries = self._parse_results(data) + logger.info(f" Found {len(entries)} entries") + all_entries.extend(entries) + + if not all_entries: + raise ValueError("No benchmark entries found in results files") + + # Determine baseline + if baseline is None: + baseline = all_entries[0].command + logger.info(f"Using baseline: {baseline}") + + # Find baseline entry + baseline_entry = None + for entry in all_entries: + if entry.command == baseline: + baseline_entry = entry + break + + if baseline_entry is None: + available = [e.command for e in all_entries] + raise ValueError( + f"Baseline '{baseline}' not found. Available: {', '.join(available)}" + ) + + # Calculate comparisons + comparisons: list[Comparison] = [] + for entry in all_entries: + if entry.command == baseline: + continue + + speedup = self._calculate_speedup(baseline_entry.mean, entry.mean) + comparisons.append( + Comparison( + name=entry.command, + mean=entry.mean, + baseline_mean=baseline_entry.mean, + speedup_percent=speedup, + stddev=entry.stddev, + ) + ) + + # Log results + logger.info("Comparison results:") + logger.info(f" Baseline ({baseline}): {baseline_entry.mean:.3f}s") + for comp in comparisons: + sign = "+" if comp.speedup_percent > 0 else "" + logger.info( + f" {comp.name}: {comp.mean:.3f}s ({sign}{comp.speedup_percent:.1f}%)" + ) + + return CompareResult( + baseline=baseline, + comparisons=comparisons, + ) + + def _parse_results(self, data: dict) -> list[BenchmarkEntry]: + """Parse results from hyperfine JSON output.""" + entries = [] + + results = data.get("results", []) + for result in results: + entries.append( + BenchmarkEntry( + command=result.get("command", "unknown"), + mean=result.get("mean", 0), + stddev=result.get("stddev"), + user=result.get("user", 0), + system=result.get("system", 0), + min=result.get("min", 0), + max=result.get("max", 0), + times=result.get("times", []), + ) + ) + + return entries + + def _calculate_speedup(self, baseline_mean: float, other_mean: float) -> float: + """Calculate speedup percentage. + + Positive = faster than baseline + Negative = slower than baseline + """ + if baseline_mean == 0: + return 0.0 + return round(((baseline_mean - other_mean) / baseline_mean) * 100, 1) + + def to_json(self, result: CompareResult) -> str: + """Convert comparison result to JSON.""" + return json.dumps( + { + "baseline": result.baseline, + "comparisons": [ + { + "name": c.name, + "mean": c.mean, + "baseline_mean": c.baseline_mean, + "speedup_percent": c.speedup_percent, + "stddev": c.stddev, + } + for c in result.comparisons + ], + }, + indent=2, + ) diff --git a/bench/config.py b/bench/config.py new file mode 100644 index 000000000000..cc54c47106f2 --- /dev/null +++ b/bench/config.py @@ -0,0 +1,230 @@ +"""Configuration management for benchcoin. + +Layered configuration (lowest to highest priority): +1. Built-in defaults +2. bench.toml config file +3. Environment variables (BENCH_*) +4. CLI arguments +""" + +from __future__ import annotations + +import os +import tomllib +from dataclasses import dataclass +from pathlib import Path +from typing import Any + + +# Built-in defaults +DEFAULTS = { + "chain": "main", + "dbcache": 450, + "stop_height": 855000, + "runs": 3, + "connect": "", # Empty = use public P2P network + "binaries_dir": "./binaries", + "output_dir": "./bench-output", +} + +# Profile overrides +PROFILES = { + "quick": { + "stop_height": 1500, + "runs": 1, + }, + "full": { + "stop_height": 855000, + "runs": 3, + }, + "ci": { + "stop_height": 855000, + "runs": 3, + "connect": "148.251.128.115:33333", + }, +} + +# Environment variable mapping +ENV_MAPPING = { + "BENCH_DATADIR": "datadir", + "BENCH_TMP_DATADIR": "tmp_datadir", + "BENCH_BINARIES_DIR": "binaries_dir", + "BENCH_OUTPUT_DIR": "output_dir", + "BENCH_STOP_HEIGHT": "stop_height", + "BENCH_DBCACHE": "dbcache", + "BENCH_CONNECT": "connect", + "BENCH_RUNS": "runs", + "BENCH_CHAIN": "chain", +} + + +@dataclass +class Config: + """Benchmark configuration.""" + + # Core benchmark settings + chain: str = "main" + dbcache: int = 450 + stop_height: int = 855000 + runs: int = 3 + connect: str = "" # Empty = use public P2P network + + # Paths + datadir: str | None = None + tmp_datadir: str | None = None + binaries_dir: str = "./binaries" + output_dir: str = "./bench-output" + + # Behavior flags + instrumented: bool = False + skip_existing: bool = False + no_cache_drop: bool = False + verbose: bool = False + dry_run: bool = False + + # Profile used (for reference) + profile: str = "full" + + def __post_init__(self) -> None: + # If tmp_datadir not set, derive from output_dir + if self.tmp_datadir is None: + self.tmp_datadir = str(Path(self.output_dir) / "tmp-datadir") + + # Instrumented mode forces runs=1 + if self.instrumented and self.runs != 1: + self.runs = 1 + + def validate(self) -> list[str]: + """Validate configuration, return list of errors.""" + errors = [] + + # datadir is optional (None = fresh sync) + if self.datadir is not None and not Path(self.datadir).exists(): + errors.append(f"datadir does not exist: {self.datadir}") + + if self.stop_height < 1: + errors.append("stop_height must be positive") + + if self.dbcache < 1: + errors.append("dbcache must be positive") + + if self.runs < 1: + errors.append("runs must be positive") + + if self.chain not in ("main", "testnet", "signet", "regtest"): + errors.append(f"invalid chain: {self.chain}") + + return errors + + +def load_toml(path: Path) -> tuple[dict[str, Any], dict[str, dict[str, Any]]]: + """Load configuration from TOML file. + + Returns: + Tuple of (base_config, profiles_dict) + """ + if not path.exists(): + return {}, {} + + with open(path, "rb") as f: + data = tomllib.load(f) + + # Flatten structure: merge [defaults] and [paths] into top level + result = {} + if "defaults" in data: + result.update(data["defaults"]) + if "paths" in data: + result.update(data["paths"]) + + # Extract profiles + profiles = data.get("profiles", {}) + + return result, profiles + + +def load_env() -> dict[str, Any]: + """Load configuration from environment variables.""" + result = {} + + for env_var, config_key in ENV_MAPPING.items(): + value = os.environ.get(env_var) + if value is not None: + # Convert numeric values + if config_key in ("stop_height", "dbcache", "runs"): + try: + value = int(value) + except ValueError: + pass # Keep as string, will fail validation + result[config_key] = value + + return result + + +def apply_profile( + config: dict[str, Any], + profile_name: str, + toml_profiles: dict[str, dict[str, Any]] | None = None, +) -> dict[str, Any]: + """Apply a named profile to configuration. + + Args: + config: Base configuration dict + profile_name: Name of profile to apply + toml_profiles: Profiles loaded from TOML file (override built-in) + """ + result = config.copy() + result["profile"] = profile_name + + # Apply built-in profile first + if profile_name in PROFILES: + result.update(PROFILES[profile_name]) + + # Then apply TOML profile (overrides built-in) + if toml_profiles and profile_name in toml_profiles: + result.update(toml_profiles[profile_name]) + + return result + + +def build_config( + cli_args: dict[str, Any] | None = None, + config_file: Path | None = None, + profile: str = "full", +) -> Config: + """Build configuration from all sources. + + Priority (lowest to highest): + 1. Built-in defaults + 2. Config file (bench.toml) base settings + 3. Built-in profile overrides + 4. Config file profile overrides + 5. Environment variables + 6. CLI arguments + """ + # Start with defaults + config = DEFAULTS.copy() + + # Load config file + if config_file is None: + config_file = Path("bench.toml") + file_config, toml_profiles = load_toml(config_file) + config.update(file_config) + + # Apply profile (built-in first, then TOML overrides) + config = apply_profile(config, profile, toml_profiles) + + # Load environment variables + env_config = load_env() + config.update(env_config) + + # Apply CLI arguments (filter out None values) + if cli_args: + for key, value in cli_args.items(): + if value is not None: + config[key] = value + + # Build Config object (filter to only valid fields) + valid_fields = {f.name for f in Config.__dataclass_fields__.values()} + filtered = {k: v for k, v in config.items() if k in valid_fields} + + return Config(**filtered) diff --git a/bench/configs/nightly.toml b/bench/configs/nightly.toml new file mode 100644 index 000000000000..0780317c9022 --- /dev/null +++ b/bench/configs/nightly.toml @@ -0,0 +1,24 @@ +# Nightly benchmark configuration +# Clone benchcoin + use this config = reproduce the benchmark +# +# Usage: +# bench.py run --benchmark-config bench/configs/nightly.toml --matrix-entry 450 \ +# --datadir /data/pruned-840k --output-dir ./output \ +# master:/path/to/bitcoind + +[benchmark] +start_height = 840000 +runs = 2 + +[bitcoind] +stopatheight = 900000 +chain = "main" +connect = "148.251.128.115:33333" # accepts whitelisted ip addrs only +prune = 10000 +daemon = false +printtoconsole = false + +# Parameter matrix - each value runs separately +# Use --matrix-entry to select one (e.g., --matrix-entry 450) +[bitcoind.matrix] +dbcache = [450, 32000] diff --git a/bench/configs/pr.toml b/bench/configs/pr.toml new file mode 100644 index 000000000000..f154d1a7b468 --- /dev/null +++ b/bench/configs/pr.toml @@ -0,0 +1,29 @@ +# PR benchmark configuration (base vs head comparison) +# Clone benchcoin + use this config = reproduce the benchmark +# +# Usage: +# bench.py run --benchmark-config bench/configs/pr.toml --matrix-entry 450-false \ +# --datadir /data/pruned-840k --output-dir ./output \ +# base:/path/to/base/bitcoind head:/path/to/head/bitcoind + +[benchmark] +start_height = 840000 +runs = 3 + +[bitcoind] +stopatheight = 855000 +chain = "main" +connect = "148.251.128.115:33333" +prune = 10000 +daemon = false +printtoconsole = false + +# Parameter matrix - creates multiple benchmark configurations +# Matrix expands to: 450-false, 450-true, 32000-false, 32000-true +[bitcoind.matrix] +dbcache = [450, 32000] +instrumented = [false, true] + +# Debug flags enabled when instrumented = true +[bitcoind.instrumented] +debug = ["coindb", "leveldb", "bench", "validation"] diff --git a/bench/configs/test-signet.toml b/bench/configs/test-signet.toml new file mode 100644 index 000000000000..dc378fcf1d60 --- /dev/null +++ b/bench/configs/test-signet.toml @@ -0,0 +1,21 @@ +# Test benchmark configuration for signet +# Quick local testing without dedicated sync peer +# +# Usage: +# bench.py run --benchmark-config bench/configs/test-signet.toml --matrix-entry 450 \ +# --datadir /path/to/signet-datadir --output-dir ./output \ +# test:./binaries/test/bitcoind + +[benchmark] +start_height = 0 +runs = 1 + +[bitcoind] +stopatheight = 10000 +chain = "signet" +prune = 1000 +daemon = false +printtoconsole = false + +[bitcoind.matrix] +dbcache = [450] diff --git a/bench/machine.py b/bench/machine.py new file mode 100644 index 000000000000..3c16659a508f --- /dev/null +++ b/bench/machine.py @@ -0,0 +1,175 @@ +"""Machine specification detection for benchmark context.""" + +from __future__ import annotations + +import logging +import subprocess +from dataclasses import dataclass +from typing import Any + +logger = logging.getLogger(__name__) + + +@dataclass +class MachineSpecs: + """Machine hardware specifications.""" + + cpu_model: str + architecture: str + cpu_cores: int + disk_type: str + os_kernel: str + total_ram_gb: float + + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary for JSON serialization.""" + return { + "cpu_model": self.cpu_model, + "architecture": self.architecture, + "cpu_cores": self.cpu_cores, + "disk_type": self.disk_type, + "os_kernel": self.os_kernel, + "total_ram_gb": self.total_ram_gb, + } + + @classmethod + def from_dict(cls, data: dict[str, Any]) -> MachineSpecs: + """Create from dictionary.""" + return cls( + cpu_model=data.get("cpu_model", "Unknown"), + architecture=data.get("architecture", "Unknown"), + cpu_cores=data.get("cpu_cores", 0), + disk_type=data.get("disk_type", "Unknown"), + os_kernel=data.get("os_kernel", "Unknown"), + total_ram_gb=data.get("total_ram_gb", 0.0), + ) + + +def _run_command(cmd: list[str]) -> str: + """Run a command and return stdout, or empty string on failure.""" + try: + result = subprocess.run(cmd, capture_output=True, text=True, timeout=5) + return result.stdout.strip() + except (subprocess.TimeoutExpired, FileNotFoundError, OSError) as e: + logger.debug(f"Command {cmd} failed: {e}") + return "" + + +def _get_cpu_info() -> tuple[str, str, int]: + """Get CPU model, architecture, and core count from lscpu.""" + output = _run_command(["lscpu"]) + + cpu_model = "Unknown" + architecture = "Unknown" + cpu_cores = 0 + + for line in output.split("\n"): + if ":" not in line: + continue + key, value = line.split(":", 1) + key = key.strip() + value = value.strip() + + if key == "Model name": + cpu_model = value + elif key == "Architecture": + architecture = value + elif key == "CPU(s)": + try: + cpu_cores = int(value) + except ValueError: + pass + + # Fallback for architecture + if architecture == "Unknown": + architecture = _run_command(["uname", "-m"]) or "Unknown" + + return cpu_model, architecture, cpu_cores + + +def _get_os_kernel() -> str: + """Get the OS kernel version from uname -r.""" + kernel = _run_command(["uname", "-r"]) + return kernel if kernel else "Unknown" + + +def _get_total_ram_gb() -> float: + """Get total RAM in GB from /proc/meminfo.""" + try: + with open("/proc/meminfo") as f: + for line in f: + if line.startswith("MemTotal:"): + # Format: "MemTotal: 16384000 kB" + parts = line.split() + if len(parts) >= 2: + kb = int(parts[1]) + return round(kb / (1024 * 1024), 1) # Convert kB to GB + except (OSError, ValueError) as e: + logger.debug(f"Failed to read /proc/meminfo: {e}") + return 0.0 + + +def _get_disk_type() -> str: + """Get the fastest disk type on the system. + + Priority: NVMe > SATA SSD > HDD + Uses lsblk to check ROTA (rotational) flag: 0 = SSD/NVMe, 1 = HDD + """ + output = _run_command(["lsblk", "-d", "-o", "NAME,ROTA,MODEL", "-n"]) + + has_nvme = False + has_ssd = False + has_hdd = False + + for line in output.split("\n"): + if not line.strip(): + continue + + parts = line.split() + if len(parts) < 2: + continue + + name = parts[0] + try: + rota = int(parts[1]) + except (ValueError, IndexError): + continue + + if name.startswith("nvme"): + has_nvme = True + elif rota == 0: + has_ssd = True + elif rota == 1: + has_hdd = True + + if has_nvme: + return "NVMe SSD" + elif has_ssd: + return "SATA SSD" + elif has_hdd: + return "HDD" + else: + return "Unknown" + + +def get_machine_specs() -> MachineSpecs: + """Detect and return current machine specifications.""" + cpu_model, architecture, cpu_cores = _get_cpu_info() + disk_type = _get_disk_type() + os_kernel = _get_os_kernel() + total_ram_gb = _get_total_ram_gb() + + specs = MachineSpecs( + cpu_model=cpu_model, + architecture=architecture, + cpu_cores=cpu_cores, + disk_type=disk_type, + os_kernel=os_kernel, + total_ram_gb=total_ram_gb, + ) + + logger.info( + f"Detected machine: {cpu_model} ({architecture}, {cpu_cores} cores, " + f"{total_ram_gb}GB RAM, {disk_type}, {os_kernel})" + ) + return specs diff --git a/bench/nightly.py b/bench/nightly.py new file mode 100644 index 000000000000..947076241af1 --- /dev/null +++ b/bench/nightly.py @@ -0,0 +1,456 @@ +"""Nightly benchmark history management and chart generation.""" + +from __future__ import annotations + +import json +import logging +from dataclasses import dataclass +from datetime import date +from pathlib import Path +from typing import Any + +logger = logging.getLogger(__name__) + +# HTML template for the nightly chart homepage +NIGHTLY_CHART_TEMPLATE = """ + + + Bitcoin Core Nightly IBD Benchmark + + + + + + +
+

Bitcoin Core Nightly IBD Benchmark

+

+ IBD from a single networked peer from block 840,000 to 900,000 on a Hetzner AX52 +

+
+
+
+

+ View PR benchmark results +

+
+ + +""" + + +@dataclass +class NightlyResult: + """A single nightly benchmark result.""" + + date: str + commit: str + config: str # 'default' or 'large' + dbcache: int + mean: float + stddev: float + runs: int + + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary for JSON serialization.""" + return { + "date": self.date, + "commit": self.commit, + "config": self.config, + "dbcache": self.dbcache, + "mean": self.mean, + "stddev": self.stddev, + "runs": self.runs, + } + + @classmethod + def from_dict(cls, data: dict[str, Any]) -> NightlyResult: + """Create from dictionary.""" + return cls( + date=data["date"], + commit=data["commit"], + config=data["config"], + dbcache=data["dbcache"], + mean=data["mean"], + stddev=data["stddev"], + runs=data["runs"], + ) + + +class NightlyHistory: + """Manages the nightly benchmark history stored in JSON.""" + + def __init__(self, history_file: Path): + self.history_file = history_file + self.results: list[NightlyResult] = [] + self.machine: dict | None = None + self.config: dict | None = None + self._load() + + def _load(self) -> None: + """Load history from JSON file.""" + if self.history_file.exists(): + with open(self.history_file) as f: + data = json.load(f) + self.results = [NightlyResult.from_dict(r) for r in data.get("results", [])] + self.machine = data.get("machine") + self.config = data.get("config") + logger.info(f"Loaded {len(self.results)} results from {self.history_file}") + else: + self.results = [] + self.machine = None + self.config = None + logger.info(f"No existing history at {self.history_file}") + + def save(self) -> None: + """Save history to JSON file.""" + self.history_file.parent.mkdir(parents=True, exist_ok=True) + data: dict = {"results": [r.to_dict() for r in self.results]} + if self.config: + data["config"] = self.config + if self.machine: + data["machine"] = self.machine + with open(self.history_file, "w") as f: + json.dump(data, f, indent=2) + logger.info(f"Saved {len(self.results)} results to {self.history_file}") + + def set_machine(self, machine_specs: dict) -> None: + """Set or update the machine specifications.""" + self.machine = machine_specs + logger.info(f"Set machine specs: {machine_specs.get('cpu_model', 'Unknown')}") + + def set_config(self, benchmark_config: dict) -> None: + """Set or update the benchmark configuration.""" + self.config = benchmark_config + logger.info( + f"Set benchmark config: {benchmark_config.get('start_height', '?')} -> " + f"{benchmark_config.get('stop_height', '?')}" + ) + + def append(self, result: NightlyResult) -> None: + """Append a new result to history.""" + # Check for duplicate (same date, commit, config) + for existing in self.results: + if ( + existing.date == result.date + and existing.commit == result.commit + and existing.config == result.config + ): + logger.warning( + f"Duplicate result for {result.date} {result.commit[:8]} {result.config}, replacing" + ) + self.results.remove(existing) + break + + self.results.append(result) + # Sort by date, then config + self.results.sort(key=lambda r: (r.date, r.config)) + logger.info( + f"Appended result: {result.date} {result.commit[:8]} {result.config} {result.mean:.1f}s" + ) + + def append_from_results_json( + self, + results_file: Path, + commit: str, + config: str, + dbcache: int, + date_str: str | None = None, + ) -> None: + """Append result from a hyperfine results.json file. + + Args: + results_file: Path to hyperfine results.json + commit: Git commit hash + config: Config name ('default' or 'large') + dbcache: DB cache size in MB + date_str: Date string (YYYY-MM-DD), defaults to today + """ + if not results_file.exists(): + raise FileNotFoundError(f"Results file not found: {results_file}") + + with open(results_file) as f: + data = json.load(f) + + # Hyperfine output has a "results" array with one entry per command + # For nightly, we only have one command (master) + results = data.get("results", []) + if not results: + raise ValueError(f"No results found in {results_file}") + + # Use the first (and should be only) result + result_data = results[0] + mean = result_data.get("mean", 0) + stddev = result_data.get("stddev", 0) + runs = len(result_data.get("times", [])) + + if date_str is None: + date_str = date.today().isoformat() + + result = NightlyResult( + date=date_str, + commit=commit, + config=config, + dbcache=dbcache, + mean=mean, + stddev=stddev if stddev else 0, + runs=runs, + ) + self.append(result) + + +def generate_nightly_chart(history: NightlyHistory, output_file: Path) -> None: + """Generate the nightly chart HTML page. + + Args: + history: NightlyHistory instance with loaded results + output_file: Path to write index.html + """ + # Convert results to JSON for embedding in HTML + chart_data = json.dumps([r.to_dict() for r in history.results]) + + html = NIGHTLY_CHART_TEMPLATE.format(chart_data=chart_data) + + output_file.parent.mkdir(parents=True, exist_ok=True) + output_file.write_text(html) + logger.info(f"Generated nightly chart: {output_file}") + + +class NightlyPhase: + """CLI interface for nightly benchmark operations.""" + + def __init__(self, history_file: Path): + self.history_file = history_file + + def append( + self, + results_file: Path, + commit: str, + config: str, + dbcache: int, + date_str: str | None = None, + capture_machine: bool = False, + benchmark_config_file: Path | None = None, + ) -> None: + """Append a result from hyperfine results.json to history. + + Args: + results_file: Path to hyperfine results.json + commit: Git commit hash + config: Config name ('default' or 'large') + dbcache: DB cache size in MB + date_str: Date string (YYYY-MM-DD), defaults to today + capture_machine: If True, detect and store machine specs + benchmark_config_file: If provided, load and store benchmark config + """ + history = NightlyHistory(self.history_file) + + if capture_machine: + from bench.machine import get_machine_specs + + specs = get_machine_specs() + history.set_machine(specs.to_dict()) + + if benchmark_config_file: + from bench.benchmark_config import BenchmarkConfig + + benchmark_config = BenchmarkConfig.from_toml(benchmark_config_file) + history.set_config(benchmark_config.to_dict()) + + history.append_from_results_json( + results_file, commit, config, dbcache, date_str + ) + history.save() + + def chart(self, output_file: Path) -> None: + """Generate the nightly chart HTML. + + Args: + output_file: Path to write index.html + """ + history = NightlyHistory(self.history_file) + generate_nightly_chart(history, output_file) diff --git a/bench/patchelf.py b/bench/patchelf.py new file mode 100644 index 000000000000..6da1e00867cf --- /dev/null +++ b/bench/patchelf.py @@ -0,0 +1,135 @@ +"""Patchelf utilities for fixing guix-built binaries on NixOS.""" + +from __future__ import annotations + +import logging +import os +import subprocess +from pathlib import Path + +logger = logging.getLogger(__name__) + + +def get_nix_interpreter() -> str | None: + """Get the path to the nix store's dynamic linker. + + Returns None if not on NixOS or can't find it. + """ + # Check if we're on NixOS + if not Path("/etc/NIXOS").exists(): + return None + + # Find the interpreter from the current glibc + # We can get this by checking what the current shell uses + try: + result = subprocess.run( + ["patchelf", "--print-interpreter", "/bin/sh"], + capture_output=True, + text=True, + ) + if result.returncode == 0: + interp = result.stdout.strip() + if interp and Path(interp).exists(): + return interp + except FileNotFoundError: + pass + + return None + + +def get_binary_interpreter(binary: Path) -> str | None: + """Get the interpreter (dynamic linker) of a binary.""" + try: + result = subprocess.run( + ["patchelf", "--print-interpreter", str(binary)], + capture_output=True, + text=True, + ) + if result.returncode == 0: + return result.stdout.strip() + except FileNotFoundError: + logger.debug("patchelf not found") + return None + + +def needs_patching(binary: Path) -> bool: + """Check if a binary needs to be patched for NixOS. + + Returns True if: + - We're on NixOS + - The binary has a non-nix interpreter (e.g., /lib64/ld-linux-x86-64.so.2) + """ + nix_interp = get_nix_interpreter() + if not nix_interp: + # Not on NixOS, no patching needed + return False + + binary_interp = get_binary_interpreter(binary) + if not binary_interp: + # Can't determine interpreter, assume no patching needed + return False + + # Check if the binary's interpreter is already in the nix store + if binary_interp.startswith("/nix/store/"): + return False + + # Binary uses a non-nix interpreter (e.g., /lib64/...) + return True + + +def patch_binary(binary: Path) -> bool: + """Patch a binary to use the nix store's dynamic linker. + + Returns True if patching was successful or not needed. + """ + if not needs_patching(binary): + logger.debug(f"Binary {binary} does not need patching") + return True + + nix_interp = get_nix_interpreter() + if not nix_interp: + logger.warning("Cannot patch binary: unable to find nix interpreter") + return False + + original_interp = get_binary_interpreter(binary) + logger.info(f"Patching binary: {binary}") + logger.info(f" Original interpreter: {original_interp}") + logger.info(f" New interpreter: {nix_interp}") + + # Make sure binary is writable + try: + os.chmod(binary, 0o755) + except OSError as e: + logger.warning(f"Could not make binary writable: {e}") + + try: + result = subprocess.run( + ["patchelf", "--set-interpreter", nix_interp, str(binary)], + capture_output=True, + text=True, + ) + if result.returncode != 0: + logger.error(f"patchelf failed: {result.stderr}") + return False + logger.info(" Patching successful") + return True + except FileNotFoundError: + logger.error("patchelf not found - install it or use nix develop") + return False + + +def ensure_binary_runnable(binary: Path) -> bool: + """Ensure a binary can run on this system. + + Patches the binary if necessary (on NixOS with non-nix binaries). + Returns True if the binary should be runnable. + """ + if not binary.exists(): + logger.error(f"Binary not found: {binary}") + return False + + # Check if patching is needed and do it + if needs_patching(binary): + return patch_binary(binary) + + return True diff --git a/bench/report.py b/bench/report.py new file mode 100644 index 000000000000..0a3b07348688 --- /dev/null +++ b/bench/report.py @@ -0,0 +1,659 @@ +"""Report phase - generate HTML reports from benchmark results. + +Ported from the JavaScript logic in .github/workflows/publish-results.yml. +""" + +from __future__ import annotations + +import json +import logging +import re +import shutil +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any + +logger = logging.getLogger(__name__) + +# HTML template for individual run report +RUN_REPORT_TEMPLATE = """ + + + Benchmark Results + + + +
+

Benchmark Results

+
+

{title}

+ + +

Run Data

+
+ + + + + + + + + + + + + {run_data_rows} + +
NetworkCommandMean (s)Std DevUser (s)System (s)
+
+ + +

Speedup Summary

+
+ + + + + + + + + {speedup_rows} + +
NetworkSpeedup (%)
+
+ + + {graphs_section} +
+
+ +""" + +# HTML template for main index +INDEX_TEMPLATE = """ + + + Bitcoin Benchmark Results + + + +
+

Bitcoin Benchmark Results

+
+

Available Results

+
    + {run_list} +
+
+
+ +""" + + +@dataclass +class BenchmarkRun: + """Parsed benchmark run data.""" + + network: str + command: str + mean: float + stddev: float | None + user: float + system: float + parameters: dict[str, Any] = field(default_factory=dict) + + +@dataclass +class ReportResult: + """Result of report generation.""" + + output_dir: Path + index_file: Path + speedups: dict[str, float] + + +class ReportGenerator: + """Generate HTML reports from benchmark results.""" + + def __init__( + self, repo_url: str = "https://github.com/bitcoin-dev-tools/benchcoin" + ): + self.repo_url = repo_url + + def generate_multi_network( + self, + network_dirs: dict[str, Path], + output_dir: Path, + title: str = "Benchmark Results", + pr_number: str | None = None, + run_id: str | None = None, + ) -> ReportResult: + """Generate HTML report from multiple network benchmark results. + + Args: + network_dirs: Dict mapping network name to directory containing results.json + output_dir: Where to write the HTML report + title: Title for the report + pr_number: PR number (for CI reports) + run_id: Run ID (for CI reports) + + Returns: + ReportResult with paths and speedup data + """ + output_dir.mkdir(parents=True, exist_ok=True) + + # Combine results from all networks + all_runs: list[BenchmarkRun] = [] + for network, input_dir in network_dirs.items(): + results_file = input_dir / "results.json" + if not results_file.exists(): + logger.warning( + f"results.json not found in {input_dir} for network {network}" + ) + continue + + with open(results_file) as f: + data = json.load(f) + + # Parse and add network to each run + for result in data.get("results", []): + all_runs.append( + BenchmarkRun( + network=network, + command=result.get("command", ""), + mean=result.get("mean", 0), + stddev=result.get("stddev"), + user=result.get("user", 0), + system=result.get("system", 0), + parameters=result.get("parameters", {}), + ) + ) + + # Copy artifacts from this network + self._copy_network_artifacts(network, input_dir, output_dir) + + if not all_runs: + raise ValueError("No benchmark results found in any network directory") + + # Calculate speedups per network + speedups = self._calculate_speedups_per_network(all_runs) + + # Build title with PR/run info if provided + full_title = title + if pr_number and run_id: + full_title = f"PR #{pr_number} - Run {run_id}" + + # Generate HTML + html = self._generate_html( + all_runs, speedups, full_title, output_dir, output_dir + ) + + # Write report + index_file = output_dir / "index.html" + index_file.write_text(html) + logger.info(f"Generated report: {index_file}") + + # Write combined results.json + combined_results = { + "results": [ + { + "network": run.network, + "command": run.command, + "mean": run.mean, + "stddev": run.stddev, + "user": run.user, + "system": run.system, + } + for run in all_runs + ], + "speedups": speedups, + } + results_file = output_dir / "results.json" + results_file.write_text(json.dumps(combined_results, indent=2)) + + return ReportResult( + output_dir=output_dir, + index_file=index_file, + speedups=speedups, + ) + + def generate( + self, + input_dir: Path, + output_dir: Path, + title: str = "Benchmark Results", + ) -> ReportResult: + """Generate HTML report from benchmark artifacts. + + Args: + input_dir: Directory containing results.json and artifacts + output_dir: Where to write the HTML report + title: Title for the report + + Returns: + ReportResult with paths and speedup data + """ + output_dir.mkdir(parents=True, exist_ok=True) + + # Load results.json + results_file = input_dir / "results.json" + if not results_file.exists(): + raise FileNotFoundError(f"results.json not found in {input_dir}") + + with open(results_file) as f: + data = json.load(f) + + # Parse results + runs = self._parse_results(data) + + # Calculate speedups + speedups = self._calculate_speedups(runs) + + # Generate HTML + html = self._generate_html(runs, speedups, title, input_dir, output_dir) + + # Write report + index_file = output_dir / "index.html" + index_file.write_text(html) + logger.info(f"Generated report: {index_file}") + + # Copy artifacts (flamegraphs, plots) + self._copy_artifacts(input_dir, output_dir) + + return ReportResult( + output_dir=output_dir, + index_file=index_file, + speedups=speedups, + ) + + def generate_index( + self, + results_dir: Path, + output_file: Path, + ) -> None: + """Generate main index.html listing all available results. + + Args: + results_dir: Directory containing pr-* subdirectories + output_file: Where to write index.html + """ + runs = [] + + if results_dir.exists(): + for pr_dir in sorted(results_dir.iterdir()): + if pr_dir.is_dir() and pr_dir.name.startswith("pr-"): + pr_num = pr_dir.name.replace("pr-", "") + pr_runs = [] + for run_dir in sorted(pr_dir.iterdir()): + if run_dir.is_dir(): + pr_runs.append(run_dir.name) + if pr_runs: + runs.append((pr_num, pr_runs)) + + run_list_html = "" + for pr_num, pr_runs in runs: + run_links = "\n".join( + f'
  • Run {run}
  • ' + for run in pr_runs + ) + run_list_html += f""" +
  • PR #{pr_num} +
      + {run_links} +
    +
  • + """ + + html = INDEX_TEMPLATE.format(run_list=run_list_html) + output_file.write_text(html) + logger.info(f"Generated index: {output_file}") + + def _parse_results(self, data: dict) -> list[BenchmarkRun]: + """Parse results from hyperfine JSON output.""" + runs = [] + + # Handle both direct hyperfine output and combined results format + results = data.get("results", []) + + for result in results: + runs.append( + BenchmarkRun( + network=result.get("network", "default"), + command=result.get("command", ""), + mean=result.get("mean", 0), + stddev=result.get("stddev"), + user=result.get("user", 0), + system=result.get("system", 0), + parameters=result.get("parameters", {}), + ) + ) + + return runs + + def _calculate_speedups(self, runs: list[BenchmarkRun]) -> dict[str, float]: + """Calculate speedup percentages. + + Uses the first entry as baseline and compares all others against it. + Returns a dict mapping command name to speedup percentage. + """ + speedups = {} + + if len(runs) < 2: + return speedups + + # Use first run as baseline + baseline = runs[0] + baseline_mean = baseline.mean + + if baseline_mean <= 0: + return speedups + + # Calculate speedup for each other run + for run in runs[1:]: + speedup = ((baseline_mean - run.mean) / baseline_mean) * 100 + # Use command name as key, extracting just the name part + name = run.command + speedups[name] = round(speedup, 1) + + return speedups + + def _calculate_speedups_per_network( + self, runs: list[BenchmarkRun] + ) -> dict[str, float]: + """Calculate speedup percentages per network. + + For each network, uses 'base' as baseline and calculates speedup for 'head'. + Returns a dict mapping network name to speedup percentage. + """ + speedups = {} + + # Group runs by network + networks: dict[str, list[BenchmarkRun]] = {} + for run in runs: + if run.network not in networks: + networks[run.network] = [] + networks[run.network].append(run) + + # Calculate speedup for each network + for network, network_runs in networks.items(): + base_mean = None + head_mean = None + + for run in network_runs: + if run.command == "base": + base_mean = run.mean + elif run.command == "head": + head_mean = run.mean + + if base_mean and head_mean and base_mean > 0: + speedup = ((base_mean - head_mean) / base_mean) * 100 + speedups[network] = round(speedup, 1) + + return speedups + + def _copy_network_artifacts( + self, network: str, input_dir: Path, output_dir: Path + ) -> None: + """Copy artifacts from a network directory with network prefix.""" + # Copy flamegraphs with network prefix + for svg in input_dir.glob("*-flamegraph.svg"): + dest = output_dir / f"{network}-{svg.name}" + shutil.copy2(svg, dest) + logger.debug(f"Copied {svg.name} as {dest.name}") + + # Copy plots directory with network prefix + plots_dir = input_dir / "plots" + if plots_dir.exists(): + dest_plots = output_dir / f"{network}-plots" + if dest_plots.exists(): + shutil.rmtree(dest_plots) + shutil.copytree(plots_dir, dest_plots) + logger.debug(f"Copied plots to {dest_plots}") + + def _generate_html( + self, + runs: list[BenchmarkRun], + speedups: dict[str, float], + title: str, + input_dir: Path, + output_dir: Path, + ) -> str: + """Generate the HTML report.""" + # Sort runs by network then by command (base first) + sorted_runs = sorted( + runs, + key=lambda r: (r.network, 0 if "base" in r.command.lower() else 1), + ) + + # Generate run data rows + run_data_rows = "" + for run in sorted_runs: + # Create commit link if there's a commit hash in the command + command_html = self._linkify_commit(run.command) + + stddev_str = f"{run.stddev:.3f}" if run.stddev else "N/A" + + run_data_rows += f""" + + {run.network} + {command_html} + {run.mean:.3f} + {stddev_str} + {run.user:.3f} + {run.system:.3f} + + """ + + # Generate speedup rows + speedup_rows = "" + for name, speedup in speedups.items(): + # Skip instrumented runs in speedup summary + if name.lower().endswith("-instrumented"): + continue + + color_class = "" + if speedup > 0: + color_class = "text-green-600" + elif speedup < 0: + color_class = "text-red-600" + + sign = "+" if speedup > 0 else "" + speedup_rows += f""" + + {name} + {sign}{speedup}% + + """ + + # Generate graphs section + graphs_section = self._generate_graphs_section(runs, input_dir, output_dir) + + return RUN_REPORT_TEMPLATE.format( + title=title, + run_data_rows=run_data_rows, + speedup_rows=speedup_rows, + graphs_section=graphs_section, + ) + + def _linkify_commit(self, command: str) -> str: + """Convert commit hashes in command to links.""" + + def replace_commit(match): + commit = match.group(1) + short_commit = commit[:8] if len(commit) > 8 else commit + return f'({short_commit})' + + return re.sub(r"\(([a-f0-9]{7,40})\)", replace_commit, command) + + def _generate_graphs_section( + self, + runs: list[BenchmarkRun], + input_dir: Path, + output_dir: Path, + ) -> str: + """Generate the flamegraphs and plots section.""" + graphs_html = "" + + for run in runs: + # Use the command/name directly (e.g., "base", "head") + name = run.command + network = run.network + + # Check for flamegraph - try both with and without network prefix + # Network-prefixed: {network}-{name}-flamegraph.svg (for multi-network reports) + # Non-prefixed: {name}-flamegraph.svg (for single-network reports) + flamegraph_name = None + flamegraph_path = None + + network_prefixed = f"{network}-{name}-flamegraph.svg" + non_prefixed = f"{name}-flamegraph.svg" + + if (output_dir / network_prefixed).exists(): + flamegraph_name = network_prefixed + flamegraph_path = output_dir / network_prefixed + elif (input_dir / non_prefixed).exists(): + flamegraph_name = non_prefixed + flamegraph_path = input_dir / non_prefixed + + # Check for plots - try both network-prefixed and non-prefixed directories + plot_files = [] + plots_dir = None + + network_plots_dir = output_dir / f"{network}-plots" + regular_plots_dir = input_dir / "plots" + + if network_plots_dir.exists(): + plots_dir = network_plots_dir + plot_files = [ + p.name + for p in plots_dir.iterdir() + if p.name.startswith(f"{name}-") and p.suffix == ".png" + ] + elif regular_plots_dir.exists(): + plots_dir = regular_plots_dir + plot_files = [ + p.name + for p in plots_dir.iterdir() + if p.name.startswith(f"{name}-") and p.suffix == ".png" + ] + + if not flamegraph_path and not plot_files: + continue + + # Build display label + display_label = f"{network} - {name}" if network != "default" else name + + graphs_html += f""" +
    +

    {display_label}

    + """ + + if flamegraph_path: + graphs_html += f""" + + """ + + if plot_files and plots_dir: + # Determine the relative path for plots + plots_rel_path = plots_dir.name + for plot in sorted(plot_files): + graphs_html += f""" + + {plot} + + """ + + graphs_html += "
    " + + if graphs_html: + return f""" +

    Flamegraphs and Plots

    + {graphs_html} + """ + + return "" + + def _copy_artifacts(self, input_dir: Path, output_dir: Path) -> None: + """Copy flamegraphs and plots to output directory.""" + # Skip if input and output are the same directory + if input_dir.resolve() == output_dir.resolve(): + logger.debug("Input and output are the same directory, skipping copy") + return + + # Copy flamegraphs + for svg in input_dir.glob("*-flamegraph.svg"): + dest = output_dir / svg.name + shutil.copy2(svg, dest) + logger.debug(f"Copied {svg.name}") + + # Copy plots directory + plots_dir = input_dir / "plots" + if plots_dir.exists(): + dest_plots = output_dir / "plots" + if dest_plots.exists(): + shutil.rmtree(dest_plots) + shutil.copytree(plots_dir, dest_plots) + logger.debug("Copied plots directory") + + +class ReportPhase: + """Generate reports from benchmark results.""" + + def __init__( + self, repo_url: str = "https://github.com/bitcoin-dev-tools/benchcoin" + ): + self.generator = ReportGenerator(repo_url) + + def run( + self, + input_dir: Path, + output_dir: Path, + title: str = "Benchmark Results", + ) -> ReportResult: + """Generate report from benchmark artifacts. + + Args: + input_dir: Directory containing results.json and artifacts + output_dir: Where to write the HTML report + title: Title for the report + + Returns: + ReportResult with paths and speedup data + """ + return self.generator.generate(input_dir, output_dir, title) + + def run_multi_network( + self, + network_dirs: dict[str, Path], + output_dir: Path, + title: str = "Benchmark Results", + pr_number: str | None = None, + run_id: str | None = None, + ) -> ReportResult: + """Generate report from multiple network benchmark results. + + Args: + network_dirs: Dict mapping network name to directory containing results.json + output_dir: Where to write the HTML report + title: Title for the report + pr_number: PR number (for CI reports) + run_id: Run ID (for CI reports) + + Returns: + ReportResult with paths and speedup data + """ + return self.generator.generate_multi_network( + network_dirs, output_dir, title, pr_number, run_id + ) + + def update_index(self, results_dir: Path, output_file: Path) -> None: + """Update the main index.html listing all results. + + Args: + results_dir: Directory containing pr-* subdirectories + output_file: Where to write index.html + """ + self.generator.generate_index(results_dir, output_file) diff --git a/bench/utils.py b/bench/utils.py new file mode 100644 index 000000000000..df454cf0644e --- /dev/null +++ b/bench/utils.py @@ -0,0 +1,105 @@ +"""Utility functions for git operations.""" + +from __future__ import annotations + +import logging +import subprocess +from pathlib import Path + +logger = logging.getLogger(__name__) + + +class GitState: + """Saved git state for restoration after operations.""" + + def __init__(self, repo_path: Path | None = None): + self.repo_path = repo_path or Path.cwd() + self.original_branch: str | None = None + self.original_commit: str | None = None + self.was_detached: bool = False + + def save(self) -> None: + """Save current git state.""" + # Check if we're on a branch or detached HEAD + result = subprocess.run( + ["git", "symbolic-ref", "--short", "HEAD"], + capture_output=True, + text=True, + cwd=self.repo_path, + ) + + if result.returncode == 0: + self.original_branch = result.stdout.strip() + self.was_detached = False + else: + # Detached HEAD - save commit hash + result = subprocess.run( + ["git", "rev-parse", "HEAD"], + capture_output=True, + text=True, + check=True, + cwd=self.repo_path, + ) + self.original_commit = result.stdout.strip() + self.was_detached = True + + logger.debug( + f"Saved git state: branch={self.original_branch}, " + f"commit={self.original_commit}, detached={self.was_detached}" + ) + + def restore(self) -> None: + """Restore saved git state.""" + if self.original_branch: + logger.debug(f"Restoring branch: {self.original_branch}") + subprocess.run( + ["git", "checkout", self.original_branch], + check=True, + cwd=self.repo_path, + ) + elif self.original_commit: + logger.debug(f"Restoring detached HEAD: {self.original_commit}") + subprocess.run( + ["git", "checkout", self.original_commit], + check=True, + cwd=self.repo_path, + ) + + +class GitError(Exception): + """Git operation failed.""" + + pass + + +def git_checkout(commit: str, repo_path: Path | None = None) -> None: + """Checkout a specific commit.""" + repo_path = repo_path or Path.cwd() + logger.info(f"Checking out {commit[:12]}") + + result = subprocess.run( + ["git", "checkout", commit], + cwd=repo_path, + capture_output=True, + text=True, + ) + + if result.returncode != 0: + raise GitError(f"Failed to checkout {commit}: {result.stderr}") + + +def git_rev_parse(ref: str, repo_path: Path | None = None) -> str: + """Resolve a git reference to a full commit hash.""" + repo_path = repo_path or Path.cwd() + + result = subprocess.run( + ["git", "rev-parse", ref], + cwd=repo_path, + capture_output=True, + text=True, + ) + + if result.returncode != 0: + raise GitError(f"Failed to resolve {ref}: {result.stderr}") + + return result.stdout.strip() diff --git a/doc/benchcoin.md b/doc/benchcoin.md new file mode 100644 index 000000000000..d16ebaac8e89 --- /dev/null +++ b/doc/benchcoin.md @@ -0,0 +1,181 @@ +# benchcoin + +A Bitcoin Core benchmarking fork with automated IBD performance tracking. + +## Overview + +This repository is a fork of Bitcoin Core that performs automated IBD (Initial Block Download) benchmarking. It measures and compares the performance impact of changes to Bitcoin Core's codebase using reproducible, long-running benchmarks. + +**Live Results:** [bitcoin-dev-tools.github.io/benchcoin](https://bitcoin-dev-tools.github.io/benchcoin) + +## Features + +- **Nightly Benchmarks** - Daily performance tracking of master branch +- **PR Benchmarks** - Automated comparison of base vs head on pull requests +- **Multiple Configurations:** + - Default cache (450 MB dbcache) + - Large cache (32 GB dbcache) + - Instrumented mode (flamegraphs + debug logging) +- **Performance Visualizations:** + - Interactive Plotly charts for nightly trends + - CPU flamegraphs with Bitcoin-specific coloring + - Time series plots (block height, cache size, tx count, LevelDB metrics) +- **Reproducible Configs** - TOML config files capture all benchmark parameters + +## Example Flamegraph + +![Example Flamegraph](../doc/flamegraph.svg) + +## How to Use + +### Benchmark a PR + +1. Open a Pull Request against **this repo** (not bitcoin/bitcoin) +2. Wait for the benchmark workflow to complete +3. Results are posted as a PR comment with link to detailed report + +### Benchmark an Existing bitcoin/bitcoin PR + +```bash +# Requires 'just' (https://github.com/casey/just) +just pick-pr # Cherry-pick commits from bitcoin/bitcoin PR +git push origin HEAD +# Open PR against this repo +``` + +## Benchmark Configurations + +All benchmark parameters are defined in config files at `bench/configs/`: + +| Config | Matrix Entries | Runs | Use Case | +|--------|---------------|------|----------| +| `nightly.toml` | 450, 32000 | 2 | Nightly tracking | +| `pr.toml` | 450-false, 450-true, 32000-false, 32000-true | 3 | PR comparison | + +Matrix entries are generated from `[bitcoind.matrix]` values (e.g., `dbcache = [450, 32000]`). +Both configs benchmark blocks 840,000 β†’ 855,000 from a dedicated sync peer. + +## Benchmark Outputs + +### Nightly (Homepage) + +- Interactive chart showing sync time trends over time +- Machine specs and config stored with results +- Accessible at the repository's GitHub Pages root + +### PR Reports + +- Timing comparison (mean, stddev, speedup %) +- CPU flamegraphs (instrumented mode) +- Performance plots (instrumented mode) +- Accessible at `/results/pr-N/run-id/` + +## Local Development + +### Prerequisites + +- [Nix](https://nixos.org/download/) with flakes enabled +- Blockchain datadir snapshot (pruned at block 840,000) + +### Quick Start + +```bash +# Enter nix environment +nix develop + +# Build binaries +python3 bench.py build HEAD~1:base HEAD:head + +# Run benchmark with config +python3 bench.py run \ + --benchmark-config bench/configs/pr.toml \ + --matrix-entry 450-false \ + --datadir /path/to/pruned-840k \ + base:./binaries/base/bitcoind \ + head:./binaries/head/bitcoind +``` + +### Just Recipes + +```bash +just test-uninstrumented HEAD~1 HEAD /path/to/datadir # Quick smoke test +just test-instrumented HEAD~1 HEAD /path/to/datadir # With flamegraphs +just build HEAD~1:base HEAD:head # Build only +just pick-pr 12345 # Cherry-pick PR +``` + +## Technical Details + +### Tools + +- [Hyperfine](https://github.com/sharkdp/hyperfine) - Benchmark timing +- [Flamegraph](https://github.com/willcl-ark/flamegraph) - CPU profiling (Bitcoin fork with custom palette) +- [Plotly.js](https://plotly.com/javascript/) - Interactive charts +- [matplotlib](https://matplotlib.org/) - Performance plots + +### CI Infrastructure + +**Runner:** Self-hosted on Hetzner AX52 (16 cores, NixOS) +- 1 core for system +- 1 core for perf/flamegraph +- 14 cores for bitcoind + +**Sync Peer:** Dedicated Hetzner VPS serving blocks over network to exercise full IBD codepaths. + +Configuration repos: +- Runner: [nix-github-runner](https://github.com/bitcoin-dev-tools/nix-github-runner) +- Seed: [nix-seed-node](https://github.com/bitcoin-dev-tools/nix-seed-node) + +### Workflow Overview + +``` +Daily 5:00 AM GMT + β”‚ + β–Ό + rebase.yml (rebase on upstream) + β”‚ + β–Ό + nightly-benchmark.yml + β”‚ + β”œβ”€β–Ί Build master + β”œβ”€β–Ί Benchmark (default + large configs) + β”œβ”€β–Ί Append to nightly-history.json + └─► Generate homepage chart + +PR opened + β”‚ + β–Ό + benchmark.yml + β”‚ + β”œβ”€β–Ί Build base + head + └─► Benchmark (4 matrix configs) + β”‚ + β–Ό + publish-results.yml + β”‚ + β”œβ”€β–Ί Generate HTML report + β”œβ”€β–Ί Commit to gh-pages + └─► Post PR comment +``` + +### Results Storage + +Results are stored on the `gh-pages` branch: + +``` +/ +β”œβ”€β”€ index.html # Nightly chart (homepage) +β”œβ”€β”€ nightly-history.json # Historical data + machine specs + config +└── results/ + β”œβ”€β”€ index.html # PR results index + └── pr-// # Individual PR reports +``` + +The `nightly-history.json` captures: +- Benchmark config (heights, peer, dbcache values, command template) +- Machine specs (CPU, cores, RAM, disk type, kernel) +- Results (date, commit, mean, stddev, runs) + +## License + +This project is licensed under the same terms as Bitcoin Core - see the [COPYING](../COPYING) file for details. diff --git a/doc/flamegraph.svg b/doc/flamegraph.svg new file mode 100644 index 000000000000..77f05068edd1 --- /dev/null +++ b/doc/flamegraph.svg @@ -0,0 +1,491 @@ +bitcoind assumeutxo IBD@head Reset ZoomSearch [unknown] (930,216,305 samples, 0.03%)libc.so.6::__GI___libc_open (1,277,437,934 samples, 0.04%)[unknown] (1,277,437,934 samples, 0.04%)[unknown] (1,121,698,471 samples, 0.03%)[unknown] (1,121,698,471 samples, 0.03%)[unknown] (1,121,698,471 samples, 0.03%)[unknown] (808,723,138 samples, 0.02%)[unknown] (705,370,773 samples, 0.02%)[unknown] (654,247,113 samples, 0.02%)[unknown] (601,840,190 samples, 0.02%)[unknown] (412,286,776 samples, 0.01%)libc.so.6::__lll_lock_wait_private (3,169,140,832 samples, 0.09%)[unknown] (3,068,852,192 samples, 0.09%)[unknown] (2,912,247,498 samples, 0.08%)[unknown] (2,859,869,350 samples, 0.08%)[unknown] (2,547,374,665 samples, 0.07%)[unknown] (2,442,338,234 samples, 0.07%)[unknown] (2,018,530,007 samples, 0.06%)[unknown] (1,768,059,272 samples, 0.05%)[unknown] (1,360,516,543 samples, 0.04%)[unknown] (941,780,033 samples, 0.03%)[unknown] (732,126,125 samples, 0.02%)[unknown] (367,091,733 samples, 0.01%)libc.so.6::__lll_lock_wake_private (53,149,822,463 samples, 1.49%)l..[unknown] (52,891,684,033 samples, 1.49%)[..[unknown] (51,489,363,011 samples, 1.45%)[..[unknown] (51,020,482,662 samples, 1.43%)[..[unknown] (46,915,115,303 samples, 1.32%)[unknown] (45,255,852,290 samples, 1.27%)[unknown] (38,150,418,340 samples, 1.07%)[unknown] (35,292,486,865 samples, 0.99%)[unknown] (7,892,404,247 samples, 0.22%)[unknown] (3,327,749,547 samples, 0.09%)[unknown] (1,188,855,625 samples, 0.03%)[unknown] (566,758,595 samples, 0.02%)libc.so.6::_int_free_create_chunk (628,326,946 samples, 0.02%)libc.so.6::_int_free_merge_chunk (358,656,602 samples, 0.01%)libc.so.6::_int_malloc (74,559,659,927 samples, 2.10%)li..[unknown] (721,620,417 samples, 0.02%)[unknown] (610,988,583 samples, 0.02%)[unknown] (610,988,583 samples, 0.02%)[unknown] (610,988,583 samples, 0.02%)[unknown] (559,250,914 samples, 0.02%)[unknown] (559,250,914 samples, 0.02%)libc.so.6::alloc_perturb (425,154,213 samples, 0.01%)libc.so.6::malloc (24,700,554,078 samples, 0.69%)libc.so.6::malloc_consolidate (735,996,757 samples, 0.02%)libc.so.6::unlink_chunk.isra.0 (6,120,352,373 samples, 0.17%)[unknown] (167,607,884,597 samples, 4.71%)[unknown]libstdc++.so.6.0.32::virtual thunk to std::__cxx11::basic_ostringstream<char, std::char_traits<char>, std::allocator<char> >::~basic_ostringstream (417,178,495 samples, 0.01%)[unknown] (417,178,495 samples, 0.01%)libc.so.6::_IO_default_xsputn (371,898,668 samples, 0.01%)libc.so.6::_IO_do_write@@GLIBC_2.2.5 (415,186,042 samples, 0.01%)libc.so.6::_IO_file_xsputn@@GLIBC_2.2.5 (52,841,892,362 samples, 1.49%)l..libc.so.6::_IO_fwrite (157,971,658,633 samples, 4.44%)libc.so...[[ext4]] (1,657,432,113 samples, 0.05%)[unknown] (573,069,492 samples, 0.02%)[[ext4]] (2,536,153,731 samples, 0.07%)[[ext4]] (10,537,322,599 samples, 0.30%)[unknown] (7,422,408,080 samples, 0.21%)[unknown] (6,329,696,449 samples, 0.18%)[unknown] (5,353,636,150 samples, 0.15%)[unknown] (5,041,980,997 samples, 0.14%)[unknown] (3,383,888,214 samples, 0.10%)[unknown] (1,348,486,405 samples, 0.04%)[unknown] (477,579,410 samples, 0.01%)[unknown] (424,961,857 samples, 0.01%)[[ext4]] (48,707,811,335 samples, 1.37%)[..[unknown] (37,296,429,178 samples, 1.05%)[unknown] (35,118,068,672 samples, 0.99%)[unknown] (29,610,843,695 samples, 0.83%)[unknown] (24,208,827,110 samples, 0.68%)[unknown] (17,096,181,771 samples, 0.48%)[unknown] (6,112,761,166 samples, 0.17%)[unknown] (1,344,893,459 samples, 0.04%)[unknown] (458,831,632 samples, 0.01%)[[ext4]] (365,017,200 samples, 0.01%)[[ext4]] (518,180,627 samples, 0.01%)[[ext4]] (466,259,788 samples, 0.01%)[[ext4]] (673,383,386 samples, 0.02%)[[ext4]] (59,764,846,104 samples, 1.68%)[..[unknown] (58,060,722,922 samples, 1.63%)[..[unknown] (7,950,480,723 samples, 0.22%)[unknown] (5,540,377,500 samples, 0.16%)[unknown] (865,590,582 samples, 0.02%)[unknown] (813,212,612 samples, 0.02%)[unknown] (813,212,612 samples, 0.02%)[unknown] (813,212,612 samples, 0.02%)[unknown] (711,368,524 samples, 0.02%)libc.so.6::__GI___libc_write (70,786,161,691 samples, 1.99%)li..[unknown] (70,568,950,557 samples, 1.98%)[u..[unknown] (69,379,113,892 samples, 1.95%)[u..[unknown] (68,772,280,665 samples, 1.93%)[u..[unknown] (66,697,097,059 samples, 1.88%)[u..[unknown] (3,800,961,354 samples, 0.11%)[unknown] (780,895,718 samples, 0.02%)libc.so.6::__memmove_avx512_unaligned_erms (15,769,232,267 samples, 0.44%)libc.so.6::__mempcpy@plt (4,938,637,189 samples, 0.14%)libc.so.6::__send (1,149,037,952 samples, 0.03%)[unknown] (1,149,037,952 samples, 0.03%)[unknown] (1,149,037,952 samples, 0.03%)[unknown] (1,149,037,952 samples, 0.03%)[unknown] (1,096,533,096 samples, 0.03%)[unknown] (1,096,533,096 samples, 0.03%)[unknown] (1,096,533,096 samples, 0.03%)[unknown] (1,094,640,456 samples, 0.03%)[unknown] (943,771,904 samples, 0.03%)[unknown] (626,496,659 samples, 0.02%)[unknown] (522,399,654 samples, 0.01%)[unknown] (469,549,544 samples, 0.01%)[unknown] (469,549,544 samples, 0.01%)[unknown] (366,321,373 samples, 0.01%)libc.so.6::_int_free (16,918,597,179 samples, 0.48%)libc.so.6::_int_free_merge_chunk (716,678,677 samples, 0.02%)libc.so.6::_int_malloc (1,269,524,481 samples, 0.04%)libc.so.6::cfree@GLIBC_2.2.5 (4,352,992,616 samples, 0.12%)libc.so.6::malloc (8,032,159,513 samples, 0.23%)libc.so.6::malloc_consolidate (39,479,511,598 samples, 1.11%)[unknown] (401,333,554 samples, 0.01%)[unknown] (401,333,554 samples, 0.01%)[unknown] (401,333,554 samples, 0.01%)[unknown] (401,333,554 samples, 0.01%)[unknown] (401,333,554 samples, 0.01%)[unknown] (401,333,554 samples, 0.01%)libc.so.6::new_do_write (469,906,341 samples, 0.01%)libc.so.6::read (459,442,054 samples, 0.01%)[unknown] (459,442,054 samples, 0.01%)[unknown] (360,200,514 samples, 0.01%)[unknown] (360,200,514 samples, 0.01%)[unknown] (360,200,514 samples, 0.01%)[unknown] (360,200,514 samples, 0.01%)libc.so.6::sysmalloc (469,717,952 samples, 0.01%)[unknown] (469,717,952 samples, 0.01%)[unknown] (415,893,983 samples, 0.01%)[unknown] (366,135,265 samples, 0.01%)[unknown] (366,135,265 samples, 0.01%)libc.so.6::unlink_chunk.isra.0 (2,862,604,776 samples, 0.08%)bitcoind::CBlockIndex::GetAncestor (412,360,660 samples, 0.01%)bitcoind::CCoinsViewCache::AccessCoin (421,783,849 samples, 0.01%)bitcoind::SipHashUint256Extra (6,150,872,313 samples, 0.17%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_erase (100,736,697,557 samples, 2.83%)bitc..bitcoind::SipHashUint256Extra (1,991,693,392 samples, 0.06%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_find_before_node (90,084,545,787 samples, 2.53%)bit..bitcoind::SipHashUint256Extra (71,251,854,599 samples, 2.00%)bi..bitcoind::SipHashUint256Extra (26,794,756,611 samples, 0.75%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_insert_unique_node (46,369,997,648 samples, 1.30%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_rehash (18,471,505,609 samples, 0.52%)libc.so.6::__memset_avx512_unaligned_erms (632,105,655 samples, 0.02%)[unknown] (579,371,219 samples, 0.02%)[unknown] (474,387,191 samples, 0.01%)[unknown] (421,585,797 samples, 0.01%)[unknown] (421,585,797 samples, 0.01%)[unknown] (368,759,434 samples, 0.01%)[unknown] (368,759,434 samples, 0.01%)[unknown] (368,759,434 samples, 0.01%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::erase (1,518,987,687 samples, 0.04%)bitcoind::SipHashUint256Extra (625,645,482 samples, 0.02%)bitcoind::SipHashUint256Extra (6,692,957,315 samples, 0.19%)[unknown] (1,036,177,296 samples, 0.03%)[unknown] (928,879,608 samples, 0.03%)[unknown] (877,183,919 samples, 0.02%)[unknown] (719,026,447 samples, 0.02%)[unknown] (666,701,067 samples, 0.02%)[unknown] (626,005,752 samples, 0.02%)[unknown] (364,282,815 samples, 0.01%)[unknown] (364,282,815 samples, 0.01%)[unknown] (364,282,815 samples, 0.01%)[unknown] (364,282,815 samples, 0.01%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::find (133,163,328,034 samples, 3.75%)bitcoi..bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_find_before_node (119,438,100,972 samples, 3.36%)bitco..bitcoind::SipHashUint256Extra (986,497,657 samples, 0.03%)bitcoind::std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>* std::__detail::_Hashtable_alloc<PoolAllocator<std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>, 144ul, 8ul> >::_M_allocate_node<std::piecewise_construct_t const&, std::tuple<COutPoint const&>, std::tuple<> > (5,414,052,109 samples, 0.15%)libc.so.6::cfree@GLIBC_2.2.5 (4,527,272,747 samples, 0.13%)bitcoind::CCoinsViewCache::BatchWrite (408,297,908,928 samples, 11.48%)bitcoind::CCoinsViewCac..bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::clear (4,431,167,402 samples, 0.12%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::find (676,133,697 samples, 0.02%)bitcoind::CCoinsViewCache::Flush (414,604,793,420 samples, 11.66%)bitcoind::CCoinsViewCach..bitcoind::CTxMemPool::removeConflicts (1,307,189,422 samples, 0.04%)bitcoind::std::_Rb_tree<COutPoint const*, std::pair<COutPoint const* const, CTransaction const*>, std::_Select1st<std::pair<COutPoint const* const, CTransaction const*> >, DereferencingComparator<COutPoint const*>, std::allocator<std::pair<COutPoint const* const, CTransaction const*> > >::find (940,298,479 samples, 0.03%)bitcoind::SipHashUint256 (1,301,282,993 samples, 0.04%)bitcoind::std::_Rb_tree<uint256, std::pair<uint256 const, long>, std::_Select1st<std::pair<uint256 const, long> >, std::less<uint256>, std::allocator<std::pair<uint256 const, long> > >::_M_erase (1,201,625,005 samples, 0.03%)bitcoind::CTxMemPool::removeForBlock (17,028,655,239 samples, 0.48%)bitcoind::std::_Rb_tree<uint256, std::pair<uint256 const, long>, std::_Select1st<std::pair<uint256 const, long> >, std::less<uint256>, std::allocator<std::pair<uint256 const, long> > >::erase (12,855,923,134 samples, 0.36%)bitcoind::std::_Rb_tree<uint256, std::pair<uint256 const, long>, std::_Select1st<std::pair<uint256 const, long> >, std::less<uint256>, std::allocator<std::pair<uint256 const, long> > >::equal_range (2,508,971,022 samples, 0.07%)[unknown] (3,441,479,431 samples, 0.10%)[unknown] (3,089,709,936 samples, 0.09%)[unknown] (2,820,174,820 samples, 0.08%)[unknown] (2,720,356,939 samples, 0.08%)[unknown] (2,720,356,939 samples, 0.08%)[unknown] (2,557,087,196 samples, 0.07%)[unknown] (2,356,775,337 samples, 0.07%)[unknown] (1,672,816,080 samples, 0.05%)[unknown] (1,100,674,926 samples, 0.03%)[unknown] (787,217,059 samples, 0.02%)[unknown] (574,492,426 samples, 0.02%)bitcoind::SipHashUint256Extra (359,543,734 samples, 0.01%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_find_before_node (10,977,516,042 samples, 0.31%)bitcoind::SipHashUint256Extra (3,562,058,963 samples, 0.10%)bitcoind::SipHashUint256Extra (1,836,963,585 samples, 0.05%)bitcoind::SipHashUint256Extra (6,867,820,925 samples, 0.19%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_insert_unique_node (16,890,522,357 samples, 0.48%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_rehash (12,768,158,119 samples, 0.36%)bitcoind::std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>* std::__detail::_Hashtable_alloc<PoolAllocator<std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>, 144ul, 8ul> >::_M_allocate_node<std::piecewise_construct_t const&, std::tuple<COutPoint const&>, std::tuple<> > (6,083,575,685 samples, 0.17%)[unknown] (2,667,289,880 samples, 0.08%)[unknown] (2,453,773,220 samples, 0.07%)[unknown] (2,293,236,868 samples, 0.06%)[unknown] (2,189,852,142 samples, 0.06%)[unknown] (1,978,814,058 samples, 0.06%)[unknown] (1,713,021,112 samples, 0.05%)[unknown] (1,360,558,892 samples, 0.04%)[unknown] (1,099,770,850 samples, 0.03%)[unknown] (785,095,967 samples, 0.02%)[unknown] (468,560,942 samples, 0.01%)[unknown] (366,515,283 samples, 0.01%)bitcoind::CCoinsViewCache::AddCoin (67,517,205,631 samples, 1.90%)bi..bitcoind::AddCoins (83,151,504,659 samples, 2.34%)bit..bitcoind::std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>* std::__detail::_Hashtable_alloc<PoolAllocator<std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>, 144ul, 8ul> >::_M_allocate_node<std::piecewise_construct_t const&, std::tuple<COutPoint const&>, std::tuple<> > (368,308,911 samples, 0.01%)bitcoind::CBlockIndex::GetAncestor (780,828,411 samples, 0.02%)bitcoind::SipHashUint256Extra (6,967,127,022 samples, 0.20%)bitcoind::CCoinsViewCache::FetchCoin (11,631,656,359 samples, 0.33%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_find_before_node (2,762,447,333 samples, 0.08%)bitcoind::CCoinsViewCache::AccessCoin (13,718,933,582 samples, 0.39%)bitcoind::CCoinsViewCache::AddCoin (935,848,977 samples, 0.03%)bitcoind::CCoinsViewCache::HaveInputs (363,967,847 samples, 0.01%)bitcoind::CCoinsViewCache::SpendCoin (775,446,488 samples, 0.02%)bitcoind::CTransaction::GetValueOut (571,129,594 samples, 0.02%)bitcoind::SipHashUint256Extra (6,132,196,838 samples, 0.17%)bitcoind::CCoinsViewCache::FetchCoin (22,771,955,106 samples, 0.64%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_find_before_node (8,387,260,917 samples, 0.24%)bitcoind::SipHashUint256Extra (672,360,582 samples, 0.02%)bitcoind::CCoinsViewCache::AccessCoin (27,541,380,041 samples, 0.77%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_find_before_node (840,128,595 samples, 0.02%)bitcoind::CCoinsViewCache::FetchCoin (9,862,576,991 samples, 0.28%)bitcoind::CCoinsViewCache::FetchCoin (723,258,358 samples, 0.02%)bitcoind::CCoinsViewBacked::GetCoin (1,001,559,892 samples, 0.03%)bitcoind::leveldb::LookupKey::LookupKey (468,932,422 samples, 0.01%)bitcoind::leveldb::(anonymous namespace)::BytewiseComparatorImpl::Compare (477,889,771 samples, 0.01%)bitcoind::leveldb::(anonymous namespace)::BytewiseComparatorImpl::Compare (2,464,437,114 samples, 0.07%)bitcoind::leveldb::FindFile (12,889,348,897 samples, 0.36%)bitcoind::leveldb::InternalKeyComparator::Compare (8,952,657,039 samples, 0.25%)libc.so.6::__memcmp_evex_movbe (3,658,168,717 samples, 0.10%)bitcoind::leveldb::InternalKeyComparator::Compare (468,603,758 samples, 0.01%)bitcoind::leveldb::(anonymous namespace)::ShardedLRUCache::Lookup (2,481,353,143 samples, 0.07%)[unknown] (470,703,247 samples, 0.01%)[unknown] (419,110,322 samples, 0.01%)[unknown] (367,081,554 samples, 0.01%)[unknown] (367,081,554 samples, 0.01%)bitcoind::std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >::_M_mutate (723,558,367 samples, 0.02%)libc.so.6::__memmove_avx512_unaligned_erms (682,634,544 samples, 0.02%)bitcoind::leveldb::Block::Iter::ParseNextKey (6,607,693,428 samples, 0.19%)libc.so.6::malloc (468,621,157 samples, 0.01%)bitcoind::leveldb::(anonymous namespace)::BytewiseComparatorImpl::Compare (3,736,764,421 samples, 0.11%)bitcoind::leveldb::InternalKeyComparator::Compare (22,752,758,306 samples, 0.64%)libc.so.6::__memcmp_evex_movbe (16,502,022,326 samples, 0.46%)bitcoind::leveldb::Block::Iter::Seek (81,753,854,146 samples, 2.30%)bit..libc.so.6::__memmove_avx512_unaligned_erms (624,754,079 samples, 0.02%)bitcoind::leveldb::Block::Iter::~Iter (1,202,042,453 samples, 0.03%)bitcoind::leveldb::Iterator::~Iterator (886,809,043 samples, 0.02%)bitcoind::leveldb::DeleteBlock (418,661,180 samples, 0.01%)bitcoind::leveldb::Block::NewIterator (1,830,741,267 samples, 0.05%)bitcoind::leveldb::BlockHandle::DecodeFrom (1,350,133,609 samples, 0.04%)bitcoind::leveldb::FilterBlockReader::KeyMayMatch (3,241,956,535 samples, 0.09%)bitcoind::leveldb::InternalFilterPolicy::KeyMayMatch (470,469,134 samples, 0.01%)bitcoind::leveldb::(anonymous namespace)::BloomFilterPolicy::KeyMayMatch (470,469,134 samples, 0.01%)bitcoind::leveldb::InternalKeyComparator::Compare (2,930,394,374 samples, 0.08%)bitcoind::leveldb::SaveValue (885,107,264 samples, 0.02%)bitcoind::leveldb::(anonymous namespace)::ShardedLRUCache::Lookup (1,152,034,360 samples, 0.03%)bitcoind::leveldb::Hash (363,890,191 samples, 0.01%)bitcoind::leveldb::Block::NewIterator (1,259,229,813 samples, 0.04%)bitcoind::leveldb::BlockHandle::DecodeFrom (1,156,612,863 samples, 0.03%)bitcoind::leveldb::GetVarint64 (416,693,035 samples, 0.01%)bitcoind::leveldb::Iterator::RegisterCleanup (363,166,691 samples, 0.01%)[unknown] (2,314,123,053 samples, 0.07%)[unknown] (2,156,687,800 samples, 0.06%)[unknown] (2,051,108,413 samples, 0.06%)[unknown] (1,945,393,833 samples, 0.05%)[unknown] (1,894,650,811 samples, 0.05%)[unknown] (1,894,650,811 samples, 0.05%)[unknown] (1,794,842,453 samples, 0.05%)[unknown] (1,315,291,384 samples, 0.04%)[unknown] (733,842,157 samples, 0.02%)[unknown] (421,059,647 samples, 0.01%)[unknown] (367,252,654 samples, 0.01%)bitcoind::crc32c::ExtendSse42 (56,521,776,403 samples, 1.59%)b..bitcoind::leveldb::ReadBlock (62,722,682,079 samples, 1.76%)b..libc.so.6::__GI___pthread_mutex_unlock_usercnt (978,769,336 samples, 0.03%)libc.so.6::cfree@GLIBC_2.2.5 (571,745,263 samples, 0.02%)bitcoind::leveldb::Table::BlockReader (93,027,689,265 samples, 2.62%)bit..libc.so.6::__memmove_avx512_unaligned_erms (525,280,305 samples, 0.01%)bitcoind::leveldb::Table::InternalGet (191,009,481,478 samples, 5.37%)bitcoind::..bitcoind::leveldb::(anonymous namespace)::ShardedLRUCache::Lookup (2,456,558,609 samples, 0.07%)bitcoind::leveldb::Hash (674,476,478 samples, 0.02%)libc.so.6::__GI___pthread_mutex_unlock_usercnt (949,827,762 samples, 0.03%)libc.so.6::__memcmp_evex_movbe (672,469,665 samples, 0.02%)libc.so.6::pthread_mutex_lock@@GLIBC_2.2.5 (770,697,666 samples, 0.02%)bitcoind::leveldb::TableCache::FindTable (5,889,647,371 samples, 0.17%)bitcoind::leveldb::TableCache::Get (199,229,141,358 samples, 5.60%)bitcoind::..bitcoind::leveldb::Version::Get (200,226,855,069 samples, 5.63%)bitcoind::..libc.so.6::__GI___pthread_mutex_unlock_usercnt (733,288,816 samples, 0.02%)bitcoind::leveldb::Version::ForEachOverlapping (215,208,197,899 samples, 6.05%)bitcoind::l..libc.so.6::__memcmp_evex_movbe (359,285,284 samples, 0.01%)bitcoind::leveldb::Version::Get (216,049,507,027 samples, 6.08%)bitcoind::l..bitcoind::leveldb::DBImpl::Get (217,672,929,621 samples, 6.12%)bitcoind::l..libc.so.6::__GI___pthread_mutex_unlock_usercnt (1,861,877,233 samples, 0.05%)bitcoind::CDBWrapper::ReadImpl[abi:cxx11] (221,752,252,623 samples, 6.24%)bitcoind::CD..libc.so.6::pthread_mutex_lock@@GLIBC_2.2.5 (1,748,433,964 samples, 0.05%)bitcoind::DecompressAmount (1,005,313,570 samples, 0.03%)bitcoind::void ScriptCompression::Unser<DataStream> (2,769,444,330 samples, 0.08%)bitcoind::void std::vector<std::byte, zero_after_free_allocator<std::byte> >::_M_range_insert<std::byte const*> (7,911,029,894 samples, 0.22%)libc.so.6::__memmove_avx512_unaligned_erms (416,410,569 samples, 0.01%)bitcoind::CCoinsViewDB::GetCoin (247,131,705,346 samples, 6.95%)bitcoind::CCo..bitcoind::CCoinsViewBacked::GetCoin (251,714,610,750 samples, 7.08%)bitcoind::CCo..bitcoind::CCoinsViewErrorCatcher::GetCoin (257,960,090,912 samples, 7.26%)bitcoind::CCoi..bitcoind::CCoinsViewDB::GetCoin (5,789,812,101 samples, 0.16%)bitcoind::SipHashUint256Extra (686,778,601 samples, 0.02%)[unknown] (1,028,820,936 samples, 0.03%)[unknown] (974,950,139 samples, 0.03%)[unknown] (867,196,862 samples, 0.02%)[unknown] (710,030,298 samples, 0.02%)[unknown] (710,030,298 samples, 0.02%)[unknown] (600,430,034 samples, 0.02%)[unknown] (489,234,171 samples, 0.01%)[unknown] (434,975,120 samples, 0.01%)[unknown] (434,975,120 samples, 0.01%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_find_before_node (29,304,700,539 samples, 0.82%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_insert_unique_node (21,307,639,964 samples, 0.60%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_rehash (20,780,827,998 samples, 0.58%)libc.so.6::__memset_avx512_unaligned_erms (579,451,231 samples, 0.02%)[unknown] (579,451,231 samples, 0.02%)[unknown] (526,649,228 samples, 0.01%)[unknown] (526,649,228 samples, 0.01%)[unknown] (526,649,228 samples, 0.01%)[unknown] (473,772,435 samples, 0.01%)[unknown] (420,996,348 samples, 0.01%)[unknown] (368,735,591 samples, 0.01%)[unknown] (368,735,591 samples, 0.01%)bitcoind::std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>* std::__detail::_Hashtable_alloc<PoolAllocator<std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>, 144ul, 8ul> >::_M_allocate_node<std::piecewise_construct_t const&, std::tuple<COutPoint const&>, std::tuple<> > (4,934,629,385 samples, 0.14%)[unknown] (421,130,280 samples, 0.01%)[unknown] (368,737,467 samples, 0.01%)[unknown] (368,737,467 samples, 0.01%)bitcoind::CCoinsViewCache::FetchCoin (327,425,895,563 samples, 9.21%)bitcoind::CCoinsVi..bitcoind::CCoinsViewErrorCatcher::GetCoin (601,145,923 samples, 0.02%)bitcoind::CCoinsViewCache::GetCoin (349,247,006,292 samples, 9.82%)bitcoind::CCoinsView..bitcoind::SipHashUint256Extra (17,454,209,723 samples, 0.49%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_find_before_node (22,697,810,020 samples, 0.64%)bitcoind::SipHashUint256Extra (4,124,049,750 samples, 0.12%)bitcoind::SipHashUint256Extra (4,306,133,540 samples, 0.12%)bitcoind::SipHashUint256Extra (7,085,914,542 samples, 0.20%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_insert_unique_node (19,180,887,889 samples, 0.54%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_rehash (12,199,005,039 samples, 0.34%)libc.so.6::__memset_avx512_unaligned_erms (574,777,734 samples, 0.02%)bitcoind::std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>* std::__detail::_Hashtable_alloc<PoolAllocator<std::__detail::_Hash_node<std::pair<COutPoint const, CCoinsCacheEntry>, false>, 144ul, 8ul> >::_M_allocate_node<std::piecewise_construct_t const&, std::tuple<COutPoint const&>, std::tuple<> > (7,865,255,678 samples, 0.22%)[unknown] (1,969,736,150 samples, 0.06%)[unknown] (1,916,111,977 samples, 0.05%)[unknown] (1,812,200,695 samples, 0.05%)[unknown] (1,812,200,695 samples, 0.05%)[unknown] (1,812,200,695 samples, 0.05%)[unknown] (1,496,076,465 samples, 0.04%)[unknown] (1,234,917,855 samples, 0.03%)[unknown] (921,179,131 samples, 0.03%)[unknown] (658,036,512 samples, 0.02%)[unknown] (507,636,670 samples, 0.01%)bitcoind::CCoinsViewCache::FetchCoin (439,862,693,437 samples, 12.37%)bitcoind::CCoinsViewCache..bitcoind::CCoinsViewCache::GetCoin (567,408,453 samples, 0.02%)bitcoind::SipHashUint256Extra (11,079,411,759 samples, 0.31%)bitcoind::CCoinsViewCache::HaveInputs (468,021,622,384 samples, 13.16%)bitcoind::CCoinsViewCache::..bitcoind::Consensus::CheckTxInputs (525,550,058,887 samples, 14.78%)bitcoind::Consensus::CheckTxInp..bitcoind::CTransaction::GetValueOut (8,116,827,965 samples, 0.23%)bitcoind::EvaluateSequenceLocks (13,084,419,728 samples, 0.37%)bitcoind::CBlockIndex::GetMedianTimePast (12,762,378,539 samples, 0.36%)bitcoind::void std::__introsort_loop<long*, long, __gnu_cxx::__ops::_Iter_less_iter> (1,776,177,595 samples, 0.05%)bitcoind::SipHashUint256Extra (3,528,590,848 samples, 0.10%)bitcoind::CCoinsViewCache::FetchCoin (9,099,104,563 samples, 0.26%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_find_before_node (3,448,133,251 samples, 0.10%)bitcoind::SipHashUint256Extra (373,550,141 samples, 0.01%)bitcoind::CCoinsViewCache::AccessCoin (10,147,664,939 samples, 0.29%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_find_before_node (368,697,772 samples, 0.01%)bitcoind::CScript::GetSigOpCount (1,181,105,155 samples, 0.03%)bitcoind::CScript::IsPayToScriptHash (361,942,649 samples, 0.01%)bitcoind::CScript::IsPushOnly (1,550,137,517 samples, 0.04%)bitcoind::CScript::IsWitnessProgram (14,154,912,421 samples, 0.40%)bitcoind::GetScriptOp (1,727,592,712 samples, 0.05%)bitcoind::CScript::GetSigOpCount (1,617,517,251 samples, 0.05%)bitcoind::GetScriptOp (834,793,526 samples, 0.02%)bitcoind::WitnessSigOps (3,120,635,596 samples, 0.09%)bitcoind::CountWitnessSigOps (25,211,941,345 samples, 0.71%)bitcoind::CScript::GetSigOpCount (21,895,087,837 samples, 0.62%)bitcoind::GetScriptOp (11,871,223,047 samples, 0.33%)bitcoind::GetLegacySigOpCount (26,548,006,408 samples, 0.75%)bitcoind::GetScriptOp (1,822,747,918 samples, 0.05%)bitcoind::SipHashUint256Extra (1,613,835,917 samples, 0.05%)bitcoind::CCoinsViewCache::FetchCoin (6,631,397,326 samples, 0.19%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_find_before_node (2,817,298,340 samples, 0.08%)bitcoind::CCoinsViewCache::AccessCoin (7,316,792,317 samples, 0.21%)bitcoind::CCoinsViewCache::FetchCoin (363,943,746 samples, 0.01%)bitcoind::CScript::GetSigOpCount (1,160,904,417 samples, 0.03%)bitcoind::GetScriptOp (688,273,084 samples, 0.02%)bitcoind::GetScriptOp (2,964,048,193 samples, 0.08%)bitcoind::CScript::GetSigOpCount (5,643,658,755 samples, 0.16%)bitcoind::CScript::IsPayToScriptHash (581,631,871 samples, 0.02%)bitcoind::GetP2SHSigOpCount (15,633,133,461 samples, 0.44%)bitcoind::GetTransactionSigOpCost (84,183,784,739 samples, 2.37%)bit..libstdc++.so.6.0.32::operator delete (405,410,027 samples, 0.01%)bitcoind::SequenceLocks (1,661,951,664 samples, 0.05%)bitcoind::CalculateSequenceLocks (1,453,270,225 samples, 0.04%)bitcoind::SipHashUint256Extra (937,441,713 samples, 0.03%)bitcoind::CCoinsViewCache::FetchCoin (2,049,216,208 samples, 0.06%)bitcoind::SipHashUint256Extra (1,345,870,966 samples, 0.04%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::_M_erase (363,086,362 samples, 0.01%)bitcoind::CCoinsViewCache::SpendCoin (20,676,663,595 samples, 0.58%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::erase (2,777,265,349 samples, 0.08%)bitcoind::SipHashUint256Extra (1,428,091,877 samples, 0.04%)bitcoind::UpdateCoins (24,385,621,354 samples, 0.69%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::erase (473,710,256 samples, 0.01%)bitcoind::AutoFile::write (1,453,030,200 samples, 0.04%)bitcoind::CSHA256::Write (1,519,729,645 samples, 0.04%)bitcoind::CompressAmount (618,711,609 samples, 0.02%)bitcoind::CompressScript (985,913,050 samples, 0.03%)[[ext4]] (404,866,263 samples, 0.01%)bitcoind::node::BlockManager::FindUndoPos (561,722,604 samples, 0.02%)bitcoind::FlatFileSeq::Allocate (509,374,850 samples, 0.01%)libc.so.6::posix_fallocate (509,374,850 samples, 0.01%)[unknown] (509,374,850 samples, 0.01%)[unknown] (509,374,850 samples, 0.01%)[unknown] (509,374,850 samples, 0.01%)[unknown] (457,299,763 samples, 0.01%)bitcoind::AutoFile::write (10,042,610,399 samples, 0.28%)bitcoind::CSHA256::Write (19,844,383,315 samples, 0.56%)bitcoind::sha256_x86_shani::Transform (3,151,148,807 samples, 0.09%)bitcoind::CompressAmount (1,773,668,392 samples, 0.05%)bitcoind::CompressScript (4,638,408,540 samples, 0.13%)bitcoind::prevector<33u, unsigned char, unsigned int, int>::resize (3,040,914,869 samples, 0.09%)bitcoind::CompressAmount (831,251,028 samples, 0.02%)bitcoind::prevector<33u, unsigned char, unsigned int, int>::resize (1,914,945,145 samples, 0.05%)bitcoind::void VectorFormatter<DefaultFormatter>::Ser<SizeComputer, std::vector<CTxUndo, std::allocator<CTxUndo> > > (7,020,871,233 samples, 0.20%)bitcoind::CompressScript (2,957,454,406 samples, 0.08%)bitcoind::AutoFile::write (4,887,544,250 samples, 0.14%)bitcoind::void WriteVarInt<AutoFile, (VarIntMode)0, unsigned int> (5,868,765,238 samples, 0.17%)bitcoind::CSHA256::Write (8,012,816,481 samples, 0.23%)bitcoind::sha256_x86_shani::Transform (938,301,513 samples, 0.03%)bitcoind::void WriteVarInt<HashWriter, (VarIntMode)0, unsigned int> (12,386,753,309 samples, 0.35%)libc.so.6::__memmove_avx512_unaligned_erms (941,007,723 samples, 0.03%)libc.so.6::_IO_fwrite (1,409,554,078 samples, 0.04%)bitcoind::node::BlockManager::UndoWriteToDisk (74,178,487,109 samples, 2.09%)bi..libc.so.6::__memmove_avx512_unaligned_erms (3,806,477,393 samples, 0.11%)bitcoind::CompressAmount (730,340,863 samples, 0.02%)bitcoind::void VectorFormatter<DefaultFormatter>::Ser<SizeComputer, std::vector<CTxUndo, std::allocator<CTxUndo> > > (9,108,229,147 samples, 0.26%)bitcoind::CompressScript (3,027,453,269 samples, 0.09%)bitcoind::prevector<33u, unsigned char, unsigned int, int>::resize (2,034,465,890 samples, 0.06%)bitcoind::void WriteVarInt<AutoFile, (VarIntMode)0, unsigned int> (367,022,852 samples, 0.01%)bitcoind::void WriteVarInt<HashWriter, (VarIntMode)0, unsigned int> (521,478,522 samples, 0.01%)bitcoind::node::BlockManager::WriteUndoDataForBlock (89,569,504,650 samples, 2.52%)bit..bitcoind::std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >::_M_dispose (583,464,405 samples, 0.02%)libc.so.6::malloc (1,716,514,762 samples, 0.05%)bitcoind::Chainstate::ConnectBlock (855,466,273,851 samples, 24.06%)bitcoind::Chainstate::ConnectBlockbitcoind::std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >::_M_dispose (418,091,278 samples, 0.01%)libc.so.6::cfree@GLIBC_2.2.5 (1,144,767,280 samples, 0.03%)bitcoind::Chainstate::ConnectTip (1,291,793,481,748 samples, 36.33%)bitcoind::Chainstate::ConnectTiplibstdc++.so.6.0.32::operator delete (627,918,999 samples, 0.02%)bitcoind::Chainstate::ActivateBestChainStep (1,291,995,942,063 samples, 36.34%)bitcoind::Chainstate::ActivateBestChainStepbitcoind::Chainstate::ActivateBestChain (1,292,515,820,515 samples, 36.35%)bitcoind::Chainstate::ActivateBestChainbitcoind::IsFinalTx (467,637,167 samples, 0.01%)bitcoind::void SerializeTransaction<ParamsStream<SizeComputer&, TransactionSerParams>, CTransaction> (25,890,452,766 samples, 0.73%)bitcoind::void WriteCompactSize<ParamsStream<SizeComputer&, TransactionSerParams> > (6,846,762,723 samples, 0.19%)bitcoind::ContextualCheckBlock (27,706,291,261 samples, 0.78%)bitcoind::void WriteCompactSize<ParamsStream<SizeComputer&, TransactionSerParams> > (1,089,597,648 samples, 0.03%)[[ext4]] (5,576,796,020 samples, 0.16%)[unknown] (3,815,201,507 samples, 0.11%)[unknown] (2,345,433,446 samples, 0.07%)[unknown] (520,783,293 samples, 0.01%)[[ext4]] (7,954,320,588 samples, 0.22%)[unknown] (1,662,013,865 samples, 0.05%)[unknown] (1,269,240,468 samples, 0.04%)[unknown] (705,348,263 samples, 0.02%)[unknown] (455,918,938 samples, 0.01%)[[nvme]] (807,919,787 samples, 0.02%)[[nvme]] (807,919,787 samples, 0.02%)[unknown] (807,919,787 samples, 0.02%)[unknown] (807,919,787 samples, 0.02%)[unknown] (807,919,787 samples, 0.02%)[unknown] (547,049,759 samples, 0.02%)[unknown] (496,243,932 samples, 0.01%)[unknown] (448,114,949 samples, 0.01%)[[ext4]] (12,810,206,632 samples, 0.36%)[unknown] (3,316,731,307 samples, 0.09%)[unknown] (2,036,481,321 samples, 0.06%)[unknown] (1,478,602,939 samples, 0.04%)[unknown] (1,322,893,322 samples, 0.04%)[unknown] (1,227,580,922 samples, 0.03%)[[ext4]] (13,121,603,080 samples, 0.37%)[[ext4]] (13,121,603,080 samples, 0.37%)bitcoind::FlatFileSeq::Flush (13,525,287,477 samples, 0.38%)libc.so.6::fdatasync (13,525,287,477 samples, 0.38%)[unknown] (13,525,287,477 samples, 0.38%)[unknown] (13,525,287,477 samples, 0.38%)[unknown] (13,525,287,477 samples, 0.38%)[[ext4]] (13,525,287,477 samples, 0.38%)[unknown] (13,525,287,477 samples, 0.38%)[unknown] (13,525,287,477 samples, 0.38%)[unknown] (13,525,287,477 samples, 0.38%)[unknown] (13,525,287,477 samples, 0.38%)[unknown] (403,684,397 samples, 0.01%)[unknown] (403,684,397 samples, 0.01%)[unknown] (403,684,397 samples, 0.01%)[[ext4]] (619,895,319 samples, 0.02%)[unknown] (483,644,425 samples, 0.01%)[[ext4]] (981,510,072 samples, 0.03%)[[ext4]] (1,916,459,846 samples, 0.05%)[unknown] (398,097,615 samples, 0.01%)[[ext4]] (1,967,105,500 samples, 0.06%)[[ext4]] (1,967,105,500 samples, 0.06%)bitcoind::node::BlockManager::FindNextBlockPos (16,065,033,072 samples, 0.45%)bitcoind::node::BlockManager::FlushBlockFile (15,700,018,553 samples, 0.44%)bitcoind::node::BlockManager::FlushUndoFile (2,174,731,076 samples, 0.06%)bitcoind::FlatFileSeq::Flush (2,174,731,076 samples, 0.06%)libc.so.6::fdatasync (2,174,731,076 samples, 0.06%)[unknown] (2,174,731,076 samples, 0.06%)[unknown] (2,174,731,076 samples, 0.06%)[unknown] (2,174,731,076 samples, 0.06%)[[ext4]] (2,174,731,076 samples, 0.06%)[unknown] (2,174,731,076 samples, 0.06%)[unknown] (2,174,731,076 samples, 0.06%)[unknown] (2,174,731,076 samples, 0.06%)[unknown] (2,119,891,081 samples, 0.06%)bitcoind::AutoFile::write (5,548,941,818 samples, 0.16%)libc.so.6::__GI___fstatat64 (365,833,677 samples, 0.01%)bitcoind::node::BlockManager::OpenBlockFile (470,569,767 samples, 0.01%)bitcoind::AutoFile::write (61,167,375,809 samples, 1.72%)b..[unknown] (598,511,547 samples, 0.02%)[unknown] (457,806,853 samples, 0.01%)[unknown] (457,806,853 samples, 0.01%)[unknown] (457,806,853 samples, 0.01%)[unknown] (409,659,414 samples, 0.01%)[unknown] (357,939,661 samples, 0.01%)bitcoind::AutoFile::write (4,647,493,060 samples, 0.13%)bitcoind::void WriteCompactSize<ParamsStream<AutoFile&, TransactionSerParams> > (6,592,272,733 samples, 0.19%)libc.so.6::_IO_fwrite (910,505,012 samples, 0.03%)bitcoind::void SerializeMany<ParamsStream<AutoFile&, TransactionSerParams>, CBlockHeader, std::vector<std::shared_ptr<CTransaction const>, std::allocator<std::shared_ptr<CTransaction const> > > > (82,131,751,453 samples, 2.31%)bit..libc.so.6::_IO_fwrite (8,527,040,897 samples, 0.24%)bitcoind::void SerializeMany<ParamsStream<SizeComputer&, TransactionSerParams>, CBlockHeader, std::vector<std::shared_ptr<CTransaction const>, std::allocator<std::shared_ptr<CTransaction const> > > > (15,937,770,258 samples, 0.45%)bitcoind::void WriteCompactSize<ParamsStream<SizeComputer&, TransactionSerParams> > (3,905,423,573 samples, 0.11%)bitcoind::void WriteCompactSize<ParamsStream<AutoFile&, TransactionSerParams> > (571,858,007 samples, 0.02%)bitcoind::node::BlockManager::WriteBlockToDisk (106,357,642,754 samples, 2.99%)bitc..bitcoind::void WriteCompactSize<ParamsStream<SizeComputer&, TransactionSerParams> > (1,696,751,451 samples, 0.05%)bitcoind::void SerializeMany<ParamsStream<SizeComputer&, TransactionSerParams>, CBlockHeader, std::vector<std::shared_ptr<CTransaction const>, std::allocator<std::shared_ptr<CTransaction const> > > > (17,168,001,989 samples, 0.48%)bitcoind::void WriteCompactSize<ParamsStream<SizeComputer&, TransactionSerParams> > (4,825,716,114 samples, 0.14%)bitcoind::node::BlockManager::SaveBlockToDisk (141,154,624,112 samples, 3.97%)bitcoi..bitcoind::void WriteCompactSize<ParamsStream<SizeComputer&, TransactionSerParams> > (1,511,914,109 samples, 0.04%)bitcoind::ChainstateManager::AcceptBlock (169,805,644,100 samples, 4.78%)bitcoind..bitcoind::void SerializeTransaction<ParamsStream<SizeComputer&, TransactionSerParams>, CTransaction> (419,732,705 samples, 0.01%)bitcoind::CScript::GetSigOpCount (1,244,733,942 samples, 0.04%)bitcoind::memcmp@plt (416,583,431 samples, 0.01%)bitcoind::std::_Rb_tree<COutPoint, COutPoint, std::_Identity<COutPoint>, std::less<COutPoint>, std::allocator<COutPoint> >::_M_erase (1,490,186,398 samples, 0.04%)bitcoind::std::pair<std::_Rb_tree_iterator<COutPoint>, bool> std::_Rb_tree<COutPoint, COutPoint, std::_Identity<COutPoint>, std::less<COutPoint>, std::allocator<COutPoint> >::_M_insert_unique<COutPoint const&> (4,247,810,353 samples, 0.12%)bitcoind::void WriteCompactSize<ParamsStream<SizeComputer&, TransactionSerParams> > (781,801,877 samples, 0.02%)libc.so.6::__memcmp_evex_movbe (6,070,441,149 samples, 0.17%)libc.so.6::cfree@GLIBC_2.2.5 (421,482,290 samples, 0.01%)libstdc++.so.6.0.32::operator delete (614,232,991 samples, 0.02%)bitcoind::CheckTransaction (25,650,523,240 samples, 0.72%)libstdc++.so.6.0.32::std::_Rb_tree_insert_and_rebalance (2,281,327,330 samples, 0.06%)bitcoind::CScript::GetSigOpCount (19,161,186,078 samples, 0.54%)bitcoind::GetScriptOp (8,992,060,021 samples, 0.25%)bitcoind::GetLegacySigOpCount (22,614,517,690 samples, 0.64%)bitcoind::GetScriptOp (1,176,069,512 samples, 0.03%)bitcoind::std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >::_M_dispose (1,349,955,285 samples, 0.04%)bitcoind::void SerializeTransaction<ParamsStream<SizeComputer&, TransactionSerParams>, CTransaction> (6,676,130,736 samples, 0.19%)bitcoind::void WriteCompactSize<ParamsStream<SizeComputer&, TransactionSerParams> > (1,329,967,416 samples, 0.04%)bitcoind::CheckBlock (60,550,319,748 samples, 1.70%)b..bitcoind::void WriteCompactSize<ParamsStream<SizeComputer&, TransactionSerParams> > (572,803,846 samples, 0.02%)bitcoind::ChainstateManager::ProcessNewBlock (1,523,688,403,640 samples, 42.85%)bitcoind::ChainstateManager::ProcessNewBlockbitcoind::sha256d64_x86_shani::Transform_2way (15,194,464,935 samples, 0.43%)bitcoind::BlockMerkleRoot (16,172,687,252 samples, 0.45%)bitcoind::ComputeMerkleRoot (15,499,928,925 samples, 0.44%)bitcoind::SHA256D64 (15,246,405,066 samples, 0.43%)bitcoind::CheckMerkleRoot (16,532,547,442 samples, 0.46%)libc.so.6::__memset_avx512_unaligned_erms (359,860,190 samples, 0.01%)bitcoind::sha256d64_x86_shani::Transform_2way (12,972,294,835 samples, 0.36%)bitcoind::SHA256D64 (13,025,009,373 samples, 0.37%)bitcoind::IsBlockMutated (30,129,022,002 samples, 0.85%)bitcoind::CheckWitnessMalleation (13,596,474,560 samples, 0.38%)bitcoind::BlockWitnessMerkleRoot (13,596,474,560 samples, 0.38%)bitcoind::ComputeMerkleRoot (13,077,728,889 samples, 0.37%)bitcoind::void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<std::vector<CInv, std::allocator<CInv> >&> (406,479,193 samples, 0.01%)bitcoind::CConnman::PushMessage (406,479,193 samples, 0.01%)bitcoind::std::vector<unsigned char, std::allocator<unsigned char> >::_M_default_append (367,056,757 samples, 0.01%)bitcoind::unsigned long ReadCompactSize<ParamsStream<DataStream&, TransactionSerParams> > (622,762,372 samples, 0.02%)bitcoind::CTransaction::ComputeHasWitness (1,387,667,716 samples, 0.04%)bitcoind::CSHA256::Write (17,955,645,390 samples, 0.51%)bitcoind::sha256_x86_shani::Transform (11,932,913,194 samples, 0.34%)bitcoind::memcpy@plt (418,918,061 samples, 0.01%)bitcoind::sha256_x86_shani::Transform (3,306,980,273 samples, 0.09%)bitcoind::CSHA256::Finalize (22,917,960,073 samples, 0.64%)libc.so.6::__memmove_avx512_unaligned_erms (668,127,949 samples, 0.02%)bitcoind::CSHA256::Write (3,163,584,691 samples, 0.09%)bitcoind::CSHA256::Write (33,313,763,000 samples, 0.94%)bitcoind::sha256_x86_shani::Transform (14,194,928,537 samples, 0.40%)bitcoind::sha256_x86_shani::Transform (767,994,599 samples, 0.02%)bitcoind::CSHA256::Write (5,341,265,376 samples, 0.15%)bitcoind::void WriteCompactSize<ParamsStream<HashWriter&, TransactionSerParams> > (7,984,745,468 samples, 0.22%)bitcoind::void SerializeTransaction<ParamsStream<HashWriter&, TransactionSerParams>, CTransaction> (50,933,406,220 samples, 1.43%)b..libc.so.6::__memmove_avx512_unaligned_erms (5,183,727,187 samples, 0.15%)bitcoind::void WriteCompactSize<ParamsStream<HashWriter&, TransactionSerParams> > (1,613,593,834 samples, 0.05%)bitcoind::CTransaction::ComputeHash (80,845,793,271 samples, 2.27%)bit..bitcoind::CSHA256::Write (23,348,148,278 samples, 0.66%)bitcoind::sha256_x86_shani::Transform (11,595,812,714 samples, 0.33%)bitcoind::CSHA256::Finalize (24,335,325,870 samples, 0.68%)bitcoind::CSHA256::Write (2,288,432,816 samples, 0.06%)bitcoind::CSHA256::Write (64,681,112,465 samples, 1.82%)bi..bitcoind::sha256_x86_shani::Transform (33,677,349,718 samples, 0.95%)bitcoind::sha256_x86_shani::Transform (622,627,277 samples, 0.02%)bitcoind::CSHA256::Write (11,395,509,513 samples, 0.32%)bitcoind::sha256_x86_shani::Transform (523,186,685 samples, 0.01%)bitcoind::void WriteCompactSize<ParamsStream<HashWriter&, TransactionSerParams> > (17,046,149,334 samples, 0.48%)libc.so.6::__memmove_avx512_unaligned_erms (2,169,704,353 samples, 0.06%)bitcoind::void SerializeTransaction<ParamsStream<HashWriter&, TransactionSerParams>, CTransaction> (92,366,151,212 samples, 2.60%)bit..libc.so.6::__memmove_avx512_unaligned_erms (5,813,350,330 samples, 0.16%)bitcoind::void WriteCompactSize<ParamsStream<HashWriter&, TransactionSerParams> > (2,181,533,875 samples, 0.06%)bitcoind::CTransaction::ComputeWitnessHash (122,098,239,092 samples, 3.43%)bitco..bitcoind::CTransaction::CTransaction (213,407,475,563 samples, 6.00%)bitcoind::C..bitcoind::CTransaction::ComputeHasWitness (420,121,661 samples, 0.01%)bitcoind::CTransaction::ComputeHash (409,129,353 samples, 0.01%)bitcoind::DataStream::read (3,530,026,319 samples, 0.10%)bitcoind::operator new (628,502,415 samples, 0.02%)bitcoind::std::vector<unsigned char, std::allocator<unsigned char> >::_M_default_append (6,309,702,925 samples, 0.18%)bitcoind::unsigned long ReadCompactSize<ParamsStream<DataStream&, TransactionSerParams> > (4,193,618,734 samples, 0.12%)bitcoind::void Unserialize<ParamsStream<DataStream&, TransactionSerParams>, 28u, unsigned char> (727,834,750 samples, 0.02%)bitcoind::unsigned long ReadCompactSize<ParamsStream<DataStream&, TransactionSerParams> > (3,066,020,716 samples, 0.09%)bitcoind::void Unserialize<ParamsStream<DataStream&, TransactionSerParams>, 28u, unsigned char> (3,105,560,893 samples, 0.09%)bitcoind::unsigned long ReadCompactSize<ParamsStream<DataStream&, TransactionSerParams> > (1,455,846,726 samples, 0.04%)libc.so.6::__memmove_avx512_unaligned_erms (465,834,593 samples, 0.01%)bitcoind::void VectorFormatter<DefaultFormatter>::Unser<ParamsStream<DataStream&, TransactionSerParams>, std::vector<CTxIn, std::allocator<CTxIn> > > (15,677,650,112 samples, 0.44%)bitcoind::unsigned long ReadCompactSize<ParamsStream<DataStream&, TransactionSerParams> > (1,078,029,303 samples, 0.03%)bitcoind::void Unserialize<ParamsStream<DataStream&, TransactionSerParams>, 28u, unsigned char> (6,171,925,860 samples, 0.17%)bitcoind::unsigned long ReadCompactSize<ParamsStream<DataStream&, TransactionSerParams> > (982,486,879 samples, 0.03%)libc.so.6::__memmove_avx512_unaligned_erms (1,458,516,290 samples, 0.04%)bitcoind::void VectorFormatter<DefaultFormatter>::Unser<ParamsStream<DataStream&, TransactionSerParams>, std::vector<CTxOut, std::allocator<CTxOut> > > (13,963,877,725 samples, 0.39%)libc.so.6::__memmove_avx512_unaligned_erms (1,048,169,614 samples, 0.03%)libc.so.6::__memset_avx512_unaligned (1,046,482,105 samples, 0.03%)libc.so.6::__memset_avx512_unaligned_erms (1,963,080,141 samples, 0.06%)libc.so.6::malloc (3,025,102,825 samples, 0.09%)libstdc++.so.6.0.32::malloc@plt (1,462,651,744 samples, 0.04%)bitcoind::void Unserialize<ParamsStream<DataStream&, TransactionSerParams>, CTransaction> (283,898,001,379 samples, 7.98%)bitcoind::void U..libstdc++.so.6.0.32::operator new (2,257,486,798 samples, 0.06%)bitcoind::void VectorFormatter<DefaultFormatter>::Unser<ParamsStream<DataStream&, TransactionSerParams>, std::vector<CTxIn, std::allocator<CTxIn> > > (1,090,260,916 samples, 0.03%)libc.so.6::__memmove_avx512_unaligned_erms (2,753,503,546 samples, 0.08%)libc.so.6::malloc (1,149,716,024 samples, 0.03%)bitcoind::void ParamsWrapper<TransactionSerParams, CBlock>::Unserialize<DataStream> (291,189,121,636 samples, 8.19%)bitcoind::void P..bitcoind::void VectorFormatter<DefaultFormatter>::Unser<ParamsStream<DataStream&, TransactionSerParams>, std::vector<std::shared_ptr<CTransaction const>, std::allocator<std::shared_ptr<CTransaction const> > > > (291,134,772,004 samples, 8.19%)bitcoind::void V..libstdc++.so.6.0.32::operator new (518,854,210 samples, 0.01%)libc.so.6::__memset_avx512_unaligned_erms (3,588,859,593 samples, 0.10%)bitcoind::CConnman::ThreadMessageHandler (1,852,055,734,561 samples, 52.09%)bitcoind::CConnman::ThreadMessageHandlerlibstdc++.so.6.0.32::execute_native_thread_routine (1,852,107,180,016 samples, 52.09%)libstdc++.so.6.0.32::execute_native_thread_routinebitcoind::std::thread::_State_impl<std::thread::_Invoker<std::tuple<void (*)(std::basic_string_view<char, std::char_traits<char> >, std::function<void ()>), char const*, CConnman::Start(CScheduler&, CConnman::Options const&)::{lambda()#5}> > >::_M_run (1,852,107,180,016 samples, 52.09%)bitcoind::std::thread::_State_impl<std::thread::_Invoker<std::tuple<void (*)(std::basic_string_view<char, std::char..bitcoind::util::TraceThread (1,852,107,180,016 samples, 52.09%)bitcoind::util::TraceThreadlibstdc++.so.6.0.32::std::__cxx11::basic_stringbuf<char, std::char_traits<char>, std::allocator<char> >::overflow (397,900,679 samples, 0.01%)b-msghand (2,401,934,372,954 samples, 67.55%)b-msghand[[igc]] (638,737,826 samples, 0.02%)[unknown] (492,740,386 samples, 0.01%)[unknown] (492,740,386 samples, 0.01%)[unknown] (492,740,386 samples, 0.01%)libc.so.6::__libc_recv (23,769,090,268 samples, 0.67%)[unknown] (23,681,676,959 samples, 0.67%)[unknown] (23,585,908,630 samples, 0.66%)[unknown] (23,544,049,599 samples, 0.66%)[unknown] (23,499,819,825 samples, 0.66%)[unknown] (23,453,162,931 samples, 0.66%)[unknown] (23,205,326,716 samples, 0.65%)[unknown] (23,046,242,743 samples, 0.65%)[unknown] (23,000,657,790 samples, 0.65%)[unknown] (22,592,454,604 samples, 0.64%)[unknown] (21,715,983,496 samples, 0.61%)[unknown] (20,537,782,242 samples, 0.58%)[unknown] (19,311,079,312 samples, 0.54%)[unknown] (6,108,735,942 samples, 0.17%)[unknown] (1,360,583,546 samples, 0.04%)bitcoind::std::vector<std::byte, zero_after_free_allocator<std::byte> >::_M_fill_insert (16,619,401,507 samples, 0.47%)bitcoind::V2Transport::GetReceivedMessage (16,718,730,797 samples, 0.47%)[[igc]] (507,437,414 samples, 0.01%)[unknown] (412,781,498 samples, 0.01%)bitcoind::ChaCha20::Crypt (134,944,431,601 samples, 3.80%)bitcoi..bitcoind::ChaCha20Aligned::Crypt (134,944,431,601 samples, 3.80%)bitcoi..[unknown] (955,536,462 samples, 0.03%)[unknown] (955,536,462 samples, 0.03%)[unknown] (906,440,192 samples, 0.03%)[unknown] (861,463,927 samples, 0.02%)[unknown] (760,654,093 samples, 0.02%)[unknown] (658,510,836 samples, 0.02%)bitcoind::BIP324Cipher::Decrypt (196,638,059,936 samples, 5.53%)bitcoind::..bitcoind::FSChaCha20Poly1305::Decrypt (196,638,059,936 samples, 5.53%)bitcoind::..bitcoind::AEADChaCha20Poly1305::Decrypt (196,638,059,936 samples, 5.53%)bitcoind::..bitcoind::poly1305_donna::poly1305_update (61,693,628,335 samples, 1.74%)b..bitcoind::poly1305_donna::poly1305_blocks (61,693,628,335 samples, 1.74%)b..[unknown] (655,063,915 samples, 0.02%)[unknown] (607,270,235 samples, 0.02%)[unknown] (525,964,847 samples, 0.01%)[unknown] (525,964,847 samples, 0.01%)[unknown] (470,111,416 samples, 0.01%)[unknown] (470,107,658 samples, 0.01%)bitcoind::V2Transport::ProcessReceivedPacketBytes (198,460,164,481 samples, 5.58%)bitcoind::..libc.so.6::__memset_avx512_unaligned_erms (1,781,860,401 samples, 0.05%)bitcoind::V2Transport::ReceivedBytes (203,432,631,557 samples, 5.72%)bitcoind::..libc.so.6::__memmove_avx512_unaligned_erms (4,655,332,308 samples, 0.13%)libc.so.6::__memmove_avx512_unaligned_erms (10,715,799,436 samples, 0.30%)bitcoind::CNode::ReceiveMsgBytes (231,225,287,054 samples, 6.50%)bitcoind::CN..bitcoind::CConnman::SocketHandlerConnected (231,463,366,433 samples, 6.51%)bitcoind::CC..libc.so.6::__poll (3,830,838,327 samples, 0.11%)[unknown] (3,830,838,327 samples, 0.11%)[unknown] (3,782,920,191 samples, 0.11%)[unknown] (3,725,807,764 samples, 0.10%)[unknown] (3,522,157,004 samples, 0.10%)[unknown] (3,150,768,515 samples, 0.09%)[unknown] (2,627,277,437 samples, 0.07%)[unknown] (2,338,467,135 samples, 0.07%)[unknown] (2,037,878,870 samples, 0.06%)[unknown] (1,480,962,324 samples, 0.04%)[unknown] (688,242,613 samples, 0.02%)bitcoind::CConnman::SocketHandler (236,436,484,949 samples, 6.65%)bitcoind::CCo..b-net (260,905,688,952 samples, 7.34%)b-netlibstdc++.so.6.0.32::execute_native_thread_routine (236,875,778,634 samples, 6.66%)libstdc++.so...bitcoind::std::thread::_State_impl<std::thread::_Invoker<std::tuple<void (*)(std::basic_string_view<char, std::char_traits<char> >, std::function<void ()>), char const*, CConnman::Start(CScheduler&, CConnman::Options const&)::{lambda()#1}> > >::_M_run (236,875,778,634 samples, 6.66%)bitcoind::std..bitcoind::util::TraceThread (236,875,778,634 samples, 6.66%)bitcoind::uti..bitcoind::CConnman::ThreadSocketHandler (236,875,778,634 samples, 6.66%)bitcoind::CCo..libc.so.6::_int_free_create_chunk (982,572,444 samples, 0.03%)libc.so.6::_int_free_merge_chunk (797,147,451 samples, 0.02%)[unknown] (2,170,942,655 samples, 0.06%)libc.so.6::__futex_abstimed_wait_common (459,293,920 samples, 0.01%)[unknown] (459,293,920 samples, 0.01%)[unknown] (405,487,988 samples, 0.01%)[unknown] (405,482,438 samples, 0.01%)[unknown] (405,482,438 samples, 0.01%)[unknown] (356,784,451 samples, 0.01%)libc.so.6::__lll_lock_wait_private (57,276,007,979 samples, 1.61%)l..[unknown] (54,932,210,267 samples, 1.54%)[..[unknown] (52,306,124,993 samples, 1.47%)[..[unknown] (51,843,804,338 samples, 1.46%)[..[unknown] (49,115,074,635 samples, 1.38%)[..[unknown] (47,020,328,627 samples, 1.32%)[unknown] (41,124,744,672 samples, 1.16%)[unknown] (38,571,784,780 samples, 1.08%)[unknown] (36,085,617,902 samples, 1.01%)[unknown] (32,172,048,607 samples, 0.90%)[unknown] (24,296,172,973 samples, 0.68%)[unknown] (14,033,556,774 samples, 0.39%)[unknown] (7,508,395,799 samples, 0.21%)[unknown] (3,295,574,070 samples, 0.09%)[unknown] (1,590,496,727 samples, 0.04%)[unknown] (1,002,849,637 samples, 0.03%)[unknown] (414,545,859 samples, 0.01%)libc.so.6::__lll_lock_wake_private (11,041,124,764 samples, 0.31%)[unknown] (10,991,162,572 samples, 0.31%)[unknown] (9,603,504,474 samples, 0.27%)[unknown] (9,459,439,012 samples, 0.27%)[unknown] (7,207,430,735 samples, 0.20%)[unknown] (5,830,933,319 samples, 0.16%)[unknown] (1,889,493,619 samples, 0.05%)[unknown] (394,342,984 samples, 0.01%)libc.so.6::_int_free (67,830,842,133 samples, 1.91%)li..libc.so.6::_int_free_merge_chunk (832,998,780 samples, 0.02%)libc.so.6::cfree@GLIBC_2.2.5 (2,087,601,863 samples, 0.06%)libc.so.6::malloc_consolidate (3,954,686,383 samples, 0.11%)libc.so.6::unlink_chunk.isra.0 (497,585,449 samples, 0.01%)bitcoind::CRollingBloomFilter::insert (356,229,732 samples, 0.01%)[unknown] (444,029,098 samples, 0.01%)[unknown] (397,328,353 samples, 0.01%)[unknown] (397,328,353 samples, 0.01%)[unknown] (397,328,353 samples, 0.01%)[unknown] (397,328,353 samples, 0.01%)bitcoind::CRollingBloomFilter::insert (165,056,371,702 samples, 4.64%)bitcoind..bitcoind::MurmurHash3 (79,485,956,130 samples, 2.24%)bit..[unknown] (508,285,343 samples, 0.01%)[unknown] (450,228,615 samples, 0.01%)[unknown] (404,433,625 samples, 0.01%)[unknown] (404,433,625 samples, 0.01%)[unknown] (404,433,625 samples, 0.01%)[unknown] (404,433,625 samples, 0.01%)bitcoind::MurmurHash3 (5,783,718,949 samples, 0.16%)bitcoind::TxOrphanage::EraseForBlock (4,219,830,042 samples, 0.12%)bitcoind::std::_Rb_tree<COutPoint, std::pair<COutPoint const, std::set<std::_Rb_tree_iterator<std::pair<transaction_identifier<true> const, TxOrphanage::OrphanTx> >, TxOrphanage::IteratorComparator, std::allocator<std::_Rb_tree_iterator<std::pair<transaction_identifier<true> const, TxOrphanage::OrphanTx> > > > >, std::_Select1st<std::pair<COutPoint const, std::set<std::_Rb_tree_iterator<std::pair<transaction_identifier<true> const, TxOrphanage::OrphanTx> >, TxOrphanage::IteratorComparator, std::allocator<std::_Rb_tree_iterator<std::pair<transaction_identifier<true> const, TxOrphanage::OrphanTx> > > > > >, std::less<COutPoint>, std::allocator<std::pair<COutPoint const, std::set<std::_Rb_tree_iterator<std::pair<transaction_identifier<true> const, TxOrphanage::OrphanTx> >, TxOrphanage::IteratorComparator, std::allocator<std::_Rb_tree_iterator<std::pair<transaction_identifier<true> const, TxOrphanage::OrphanTx> > > > > > >::find (834,275,777 samples, 0.02%)bitcoind::node::TxDownloadManagerImpl::BlockConnected (176,131,189,628 samples, 4.95%)bitcoind:..bitcoind::TxRequestTracker::ForgetTxHash (789,439,865 samples, 0.02%)bitcoind::std::_Function_handler<void (), ValidationSignals::BlockConnected(ChainstateRole, std::shared_ptr<CBlock const> const&, CBlockIndex const*)::{lambda()#2}>::_M_invoke (177,028,683,872 samples, 4.98%)bitcoind:..bitcoind::std::_Sp_counted_ptr_inplace<CTransaction const, std::allocator<void>, (__gnu_cxx::_Lock_policy)2>::_M_dispose (18,094,676,466 samples, 0.51%)libc.so.6::cfree@GLIBC_2.2.5 (20,756,908,966 samples, 0.58%)bitcoind::std::_Sp_counted_ptr_inplace<CBlock, std::allocator<void>, (__gnu_cxx::_Lock_policy)2>::_M_dispose (48,020,349,476 samples, 1.35%)b..libstdc++.so.6.0.32::operator delete (7,010,168,745 samples, 0.20%)bitcoind::std::_Sp_counted_ptr_inplace<CTransaction const, std::allocator<void>, (__gnu_cxx::_Lock_policy)2>::_M_dispose (595,474,492 samples, 0.02%)libc.so.6::cfree@GLIBC_2.2.5 (1,199,917,863 samples, 0.03%)bitcoind::std::_Function_handler<void (), ValidationSignals::BlockConnected(ChainstateRole, std::shared_ptr<CBlock const> const&, CBlockIndex const*)::{lambda()#2}>::_M_manager (50,738,017,178 samples, 1.43%)b..bitcoind::std::_Sp_counted_base<(__gnu_cxx::_Lock_policy)2>::_M_release (50,738,017,178 samples, 1.43%)b..libstdc++.so.6.0.32::operator delete (678,866,047 samples, 0.02%)bitcoind::CBlockPolicyEstimator::processBlock (2,721,087,031 samples, 0.08%)bitcoind::TxConfirmStats::UpdateMovingAverages (2,530,304,686 samples, 0.07%)bitcoind::std::_Function_handler<void (), ValidationSignals::MempoolTransactionsRemovedForBlock(std::vector<RemovedMempoolTransactionInfo, std::allocator<RemovedMempoolTransactionInfo> > const&, unsigned int)::{lambda()#2}>::_M_invoke (2,804,941,944 samples, 0.08%)bitcoind::SerialTaskRunner::ProcessQueue (230,828,220,555 samples, 6.49%)bitcoind::Se..bitcoind::CScheduler::serviceQueue (231,341,597,555 samples, 6.51%)bitcoind::CS..bitcoind::std::_Function_handler<void (), Repeat(CScheduler&, std::function<void ()>, std::chrono::duration<long, std::ratio<1l, 1000l> >)::{lambda()#1}>::_M_invoke (386,989,959 samples, 0.01%)bitcoind::Repeat (386,989,959 samples, 0.01%)bitcoind::CSHA512::Finalize (386,989,959 samples, 0.01%)b-scheduler (378,036,629,725 samples, 10.63%)b-schedulerlibstdc++.so.6.0.32::execute_native_thread_routine (231,550,611,141 samples, 6.51%)libstdc++.so..bitcoind::std::thread::_State_impl<std::thread::_Invoker<std::tuple<void (*)(std::basic_string_view<char, std::char_traits<char> >, std::function<void ()>), char const*, AppInitMain(node::NodeContext&, interfaces::BlockAndHeaderTipInfo*)::{lambda()#1}> > >::_M_run (231,550,611,141 samples, 6.51%)bitcoind::st..bitcoind::util::TraceThread (231,550,611,141 samples, 6.51%)bitcoind::ut..[unknown] (1,418,945,425 samples, 0.04%)[unknown] (1,418,945,425 samples, 0.04%)[unknown] (1,418,945,425 samples, 0.04%)[unknown] (1,418,945,425 samples, 0.04%)[unknown] (1,418,945,425 samples, 0.04%)[unknown] (1,418,945,425 samples, 0.04%)[unknown] (1,418,945,425 samples, 0.04%)[unknown] (1,368,549,335 samples, 0.04%)[unknown] (1,263,446,697 samples, 0.04%)[unknown] (1,105,228,005 samples, 0.03%)[unknown] (684,110,353 samples, 0.02%)[unknown] (1,463,102,999 samples, 0.04%)libc.so.6::_int_malloc (1,478,820,457 samples, 0.04%)[unknown] (1,323,329,878 samples, 0.04%)[unknown] (1,219,148,488 samples, 0.03%)[unknown] (1,167,736,581 samples, 0.03%)[unknown] (1,167,736,581 samples, 0.03%)[unknown] (1,115,451,061 samples, 0.03%)[unknown] (1,014,330,812 samples, 0.03%)[unknown] (911,337,057 samples, 0.03%)[unknown] (714,835,817 samples, 0.02%)[unknown] (456,457,319 samples, 0.01%)[unknown] (3,458,133,839 samples, 0.10%)bitcoind::CDBWrapper::~CDBWrapper (1,160,687,762 samples, 0.03%)bitcoind::leveldb::DBImpl::~DBImpl (1,160,687,762 samples, 0.03%)bitcoind::leveldb::DBImpl::~DBImpl (1,160,687,762 samples, 0.03%)bitcoind::leveldb::TableCache::~TableCache (1,160,687,762 samples, 0.03%)bitcoind::leveldb::(anonymous namespace)::ShardedLRUCache::~ShardedLRUCache (1,160,687,762 samples, 0.03%)bitcoind::leveldb::(anonymous namespace)::LRUCache::~LRUCache (1,160,687,762 samples, 0.03%)bitcoind::leveldb::DeleteEntry (1,160,687,762 samples, 0.03%)libc.so.6::__munmap (1,160,687,762 samples, 0.03%)[unknown] (1,160,687,762 samples, 0.03%)[unknown] (1,160,687,762 samples, 0.03%)[unknown] (1,160,687,762 samples, 0.03%)[unknown] (1,160,687,762 samples, 0.03%)[unknown] (1,160,687,762 samples, 0.03%)[unknown] (1,160,687,762 samples, 0.03%)[unknown] (1,160,687,762 samples, 0.03%)[unknown] (1,160,687,762 samples, 0.03%)[unknown] (1,160,687,762 samples, 0.03%)[unknown] (580,697,270 samples, 0.02%)bitcoind::leveldb::PutVarint32 (363,737,260 samples, 0.01%)bitcoind::leveldb::PutLengthPrefixedSlice (571,217,019 samples, 0.02%)bitcoind::leveldb::WriteBatch::Delete (2,702,574,018 samples, 0.08%)bitcoind::leveldb::WriteBatchInternal::SetCount (1,715,286,573 samples, 0.05%)bitcoind::leveldb::WriteBatchInternal::SetCount (1,453,616,163 samples, 0.04%)bitcoind::CDBBatch::EraseImpl (5,090,452,967 samples, 0.14%)bitcoind::leveldb::PutVarint32 (1,872,876,736 samples, 0.05%)bitcoind::leveldb::PutLengthPrefixedSlice (2,343,591,543 samples, 0.07%)bitcoind::leveldb::PutVarint32 (572,117,605 samples, 0.02%)bitcoind::leveldb::PutVarint32 (567,491,257 samples, 0.02%)bitcoind::leveldb::PutLengthPrefixedSlice (938,977,738 samples, 0.03%)bitcoind::leveldb::WriteBatchInternal::Count (619,405,896 samples, 0.02%)bitcoind::leveldb::WriteBatch::Put (2,689,024,451 samples, 0.08%)bitcoind::CDBBatch::WriteImpl (10,634,135,335 samples, 0.30%)bitcoind::leveldb::GetLengthPrefixedSlice (463,225,027 samples, 0.01%)bitcoind::leveldb::GetLengthPrefixedSlice (6,489,010,398 samples, 0.18%)bitcoind::leveldb::GetVarint32 (3,004,905,545 samples, 0.08%)bitcoind::leveldb::GetVarint32 (1,160,323,181 samples, 0.03%)bitcoind::leveldb::Arena::AllocateAligned (406,996,319 samples, 0.01%)bitcoind::leveldb::(anonymous namespace)::BytewiseComparatorImpl::Compare (717,398,174 samples, 0.02%)bitcoind::leveldb::MemTable::KeyComparator::operator (5,108,835,410 samples, 0.14%)bitcoind::leveldb::InternalKeyComparator::Compare (3,324,232,989 samples, 0.09%)bitcoind::leveldb::InternalKeyComparator::Compare (4,244,823,969 samples, 0.12%)bitcoind::leveldb::(anonymous namespace)::BytewiseComparatorImpl::Compare (3,179,677,931 samples, 0.09%)bitcoind::leveldb::(anonymous namespace)::BytewiseComparatorImpl::Compare (33,796,395,298 samples, 0.95%)bitcoind::memcmp@plt (943,665,852 samples, 0.03%)bitcoind::leveldb::SkipList<char const*, leveldb::MemTable::KeyComparator>::Insert (185,524,871,422 samples, 5.22%)bitcoind:..bitcoind::leveldb::SkipList<char const*, leveldb::MemTable::KeyComparator>::FindGreaterOrEqual (178,286,921,652 samples, 5.01%)bitcoind:..bitcoind::leveldb::MemTable::KeyComparator::operator (98,574,957,808 samples, 2.77%)bitc..bitcoind::leveldb::InternalKeyComparator::Compare (75,114,665,063 samples, 2.11%)bi..libc.so.6::__memcmp_evex_movbe (8,323,863,446 samples, 0.23%)bitcoind::leveldb::MemTable::Add (188,893,844,275 samples, 5.31%)bitcoind::..bitcoind::leveldb::VarintLength (766,638,876 samples, 0.02%)bitcoind::leveldb::WriteBatchInternal::InsertInto (199,306,778,687 samples, 5.61%)bitcoind::..bitcoind::leveldb::WriteBatch::Iterate (198,740,714,232 samples, 5.59%)bitcoind::..bitcoind::crc32c::ExtendSse42 (471,197,509 samples, 0.01%)[[ext4]] (679,093,773 samples, 0.02%)[unknown] (522,409,669 samples, 0.01%)[[ext4]] (1,096,838,426 samples, 0.03%)[[ext4]] (1,722,362,275 samples, 0.05%)[unknown] (625,523,849 samples, 0.02%)[unknown] (574,147,567 samples, 0.02%)[unknown] (469,028,477 samples, 0.01%)[unknown] (469,028,477 samples, 0.01%)[unknown] (365,648,781 samples, 0.01%)[[ext4]] (4,389,086,262 samples, 0.12%)[unknown] (2,561,710,219 samples, 0.07%)[unknown] (2,561,710,219 samples, 0.07%)[unknown] (2,352,117,097 samples, 0.07%)[unknown] (1,880,182,821 samples, 0.05%)[unknown] (1,308,734,829 samples, 0.04%)[unknown] (523,736,031 samples, 0.01%)[[ext4]] (5,069,490,473 samples, 0.14%)[unknown] (5,069,490,473 samples, 0.14%)[unknown] (575,311,800 samples, 0.02%)[unknown] (470,084,210 samples, 0.01%)libc.so.6::__GI___libc_write (5,174,401,795 samples, 0.15%)[unknown] (5,174,401,795 samples, 0.15%)[unknown] (5,174,401,795 samples, 0.15%)[unknown] (5,174,401,795 samples, 0.15%)[unknown] (5,174,401,795 samples, 0.15%)bitcoind::CDBWrapper::WriteBatch (205,215,727,495 samples, 5.77%)bitcoind::C..bitcoind::leveldb::DBImpl::Write (205,215,727,495 samples, 5.77%)bitcoind::l..bitcoind::leveldb::log::Writer::AddRecord (5,908,948,808 samples, 0.17%)bitcoind::leveldb::log::Writer::EmitPhysicalRecord (5,908,948,808 samples, 0.17%)bitcoind::CompressScript (1,030,024,630 samples, 0.03%)bitcoind::prevector<33u, unsigned char, unsigned int, int>::resize (459,767,226 samples, 0.01%)bitcoind::void WriteVarInt<DataStream, (VarIntMode)0, unsigned int> (11,377,276,951 samples, 0.32%)bitcoind::void std::vector<std::byte, zero_after_free_allocator<std::byte> >::_M_range_insert<std::byte const*> (8,938,854,890 samples, 0.25%)bitcoind::CCoinsViewDB::BatchWrite (244,230,597,449 samples, 6.87%)bitcoind::CCo..bitcoind::void std::vector<std::byte, zero_after_free_allocator<std::byte> >::_M_range_insert<std::byte const*> (6,482,431,215 samples, 0.18%)bitcoind::std::_Hashtable<COutPoint, std::pair<COutPoint const, CCoinsCacheEntry>, PoolAllocator<std::pair<COutPoint const, CCoinsCacheEntry>, 144ul, 8ul>, std::__detail::_Select1st, std::equal_to<COutPoint>, SaltedOutpointHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<false, false, true> >::clear (13,761,064,935 samples, 0.39%)bitcoind::void std::vector<std::byte, zero_after_free_allocator<std::byte> >::_M_range_insert<std::byte const*> (364,557,178 samples, 0.01%)bitcoind::CCoinsViewCache::Flush (264,031,161,045 samples, 7.43%)bitcoind::CCoi..libc.so.6::cfree@GLIBC_2.2.5 (5,262,867,110 samples, 0.15%)bitcoind::Chainstate::ForceFlushStateToDisk (264,186,830,154 samples, 7.43%)bitcoind::Chai..bitcoind::Chainstate::FlushStateToDisk (264,186,830,154 samples, 7.43%)bitcoind::Chai..libc.so.6::__libc_start_call_main (265,453,083,455 samples, 7.47%)libc.so.6::__l..bitcoind::main (265,453,083,455 samples, 7.47%)bitcoind::mainbitcoind::Shutdown (265,453,083,455 samples, 7.47%)bitcoind::Shut..libc.so.6::_int_free (2,825,988,487 samples, 0.08%)libc.so.6::malloc_consolidate (2,950,349,980 samples, 0.08%)b-shutoff (278,389,331,208 samples, 7.83%)b-shutofflibc.so.6::unlink_chunk.isra.0 (3,181,018,445 samples, 0.09%)libc.so.6::_int_malloc (620,560,935 samples, 0.02%)[unknown] (518,649,070 samples, 0.01%)[unknown] (466,591,536 samples, 0.01%)[unknown] (466,591,536 samples, 0.01%)[unknown] (466,591,536 samples, 0.01%)[unknown] (415,625,450 samples, 0.01%)[unknown] (415,625,450 samples, 0.01%)[unknown] (363,215,208 samples, 0.01%)[unknown] (1,501,827,638 samples, 0.04%)bitcoind::leveldb::BlockBuilder::Add (581,064,351 samples, 0.02%)bitcoind::leveldb::TableBuilder::Add (1,003,488,869 samples, 0.03%)bitcoind::leveldb::DBImpl::WriteLevel0Table (1,214,913,728 samples, 0.03%)bitcoind::leveldb::BuildTable (1,214,913,728 samples, 0.03%)bitcoind::leveldb::WriteBatchInternal::InsertInto (2,528,384,688 samples, 0.07%)bitcoind::leveldb::WriteBatch::Iterate (2,528,384,688 samples, 0.07%)bitcoind::leveldb::MemTable::Add (2,422,985,691 samples, 0.07%)bitcoind::leveldb::SkipList<char const*, leveldb::MemTable::KeyComparator>::Insert (2,422,985,691 samples, 0.07%)bitcoind::leveldb::SkipList<char const*, leveldb::MemTable::KeyComparator>::FindGreaterOrEqual (2,318,036,540 samples, 0.07%)bitcoind::leveldb::MemTable::KeyComparator::operator (1,429,299,251 samples, 0.04%)bitcoind::leveldb::InternalKeyComparator::Compare (910,982,229 samples, 0.03%)bitcoind::CDBWrapper::CDBWrapper (5,007,147,537 samples, 0.14%)bitcoind::leveldb::DB::Open (5,007,147,537 samples, 0.14%)bitcoind::leveldb::DBImpl::Recover (4,954,666,055 samples, 0.14%)bitcoind::leveldb::DBImpl::RecoverLogFile (4,954,666,055 samples, 0.14%)libc.so.6::__memmove_avx512_unaligned_erms (1,000,227,273 samples, 0.03%)[unknown] (1,000,227,273 samples, 0.03%)[unknown] (1,000,227,273 samples, 0.03%)[unknown] (947,397,460 samples, 0.03%)[unknown] (947,397,460 samples, 0.03%)[unknown] (947,397,460 samples, 0.03%)[unknown] (841,684,608 samples, 0.02%)[unknown] (841,684,608 samples, 0.02%)[unknown] (841,684,608 samples, 0.02%)[unknown] (788,837,171 samples, 0.02%)bitcoind::node::BlockManager::GetAllBlockIndices (356,174,463 samples, 0.01%)bitcoind::base_uint<256u>::operator/= (4,353,340,184 samples, 0.12%)bitcoind::base_uint<256u>::operator>>=(unsigned int) (1,651,178,228 samples, 0.05%)bitcoind::GetBlockProof (4,611,529,418 samples, 0.13%)bitcoind::CSHA256::Finalize (469,172,416 samples, 0.01%)bitcoind::CSHA256::Write (416,395,152 samples, 0.01%)bitcoind::CBlockHeader::GetHash (889,433,319 samples, 0.03%)bitcoind::CSHA256::Write (420,260,903 samples, 0.01%)bitcoind::CheckProofOfWorkImpl (628,054,325 samples, 0.02%)bitcoind::arith_uint256::SetCompact (474,577,125 samples, 0.01%)bitcoind::leveldb::(anonymous namespace)::MergingIterator::Next (359,545,524 samples, 0.01%)bitcoind::leveldb::(anonymous namespace)::DBIter::FindNextUserEntry (615,884,661 samples, 0.02%)bitcoind::std::_Hashtable<uint256, std::pair<uint256 const, CBlockIndex>, std::allocator<std::pair<uint256 const, CBlockIndex> >, std::__detail::_Select1st, std::equal_to<uint256>, BlockHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<true, false, true> >::_M_rehash (412,349,637 samples, 0.01%)bitcoind::node::BlockManager::InsertBlockIndex (926,266,820 samples, 0.03%)bitcoind::std::_Hashtable<uint256, std::pair<uint256 const, CBlockIndex>, std::allocator<std::pair<uint256 const, CBlockIndex> >, std::__detail::_Select1st, std::equal_to<uint256>, BlockHasher, std::__detail::_Mod_range_hashing, std::__detail::_Default_ranged_hash, std::__detail::_Prime_rehash_policy, std::__detail::_Hashtable_traits<true, false, true> >::_M_insert_unique_node (621,546,429 samples, 0.02%)bitcoind::kernel::BlockTreeDB::LoadBlockIndexGuts (4,371,914,721 samples, 0.12%)bitcoind::node::BlockManager::GetAllBlockIndices (360,699,633 samples, 0.01%)bitcoind::void std::__introsort_loop<__gnu_cxx::__normal_iterator<CBlockIndex**, std::vector<CBlockIndex*, std::allocator<CBlockIndex*> > >, long, __gnu_cxx::__ops::_Iter_comp_iter<node::CBlockIndexHeightOnlyComparator> > (487,613,426 samples, 0.01%)bitcoind::node::BlockManager::LoadBlockIndexDB (10,756,421,448 samples, 0.30%)bitcoind::node::BlockManager::LoadBlockIndex (10,397,563,911 samples, 0.29%)libc.so.6::__libc_start_call_main (17,915,410,780 samples, 0.50%)bitcoind::main (17,915,410,780 samples, 0.50%)bitcoind::AppInitMain (17,915,410,780 samples, 0.50%)bitcoind::InitAndLoadChainstate (17,915,410,780 samples, 0.50%)bitcoind::node::LoadChainstate (17,915,410,780 samples, 0.50%)bitcoind::node::CompleteChainstateInitialization (17,915,410,780 samples, 0.50%)bitcoind::ChainstateManager::LoadBlockIndex (12,499,349,673 samples, 0.35%)bitcoind::void std::__introsort_loop<__gnu_cxx::__normal_iterator<CBlockIndex**, std::vector<CBlockIndex*, std::allocator<CBlockIndex*> > >, long, __gnu_cxx::__ops::_Iter_comp_iter<node::CBlockIndexHeightOnlyComparator> > (711,414,524 samples, 0.02%)bitcoind::void std::__introsort_loop<__gnu_cxx::__normal_iterator<CBlockIndex**, std::vector<CBlockIndex*, std::allocator<CBlockIndex*> > >, long, __gnu_cxx::__ops::_Iter_comp_iter<node::CBlockIndexHeightOnlyComparator> > (401,238,745 samples, 0.01%)libc.so.6::_int_free (620,663,041 samples, 0.02%)bitcoind::leveldb::(anonymous namespace)::BytewiseComparatorImpl::Compare (868,925,227 samples, 0.02%)bitcoind::leveldb::(anonymous namespace)::MergingIterator::Valid (404,648,282 samples, 0.01%)bitcoind::leveldb::(anonymous namespace)::MergingIterator::value (764,967,422 samples, 0.02%)bitcoind::leveldb::Compaction::ShouldStopBefore (811,517,390 samples, 0.02%)bitcoind::leveldb::TableCache::Evict (924,140,736 samples, 0.03%)bitcoind::leveldb::(anonymous namespace)::ShardedLRUCache::Erase (924,140,736 samples, 0.03%)bitcoind::leveldb::(anonymous namespace)::LRUCache::FinishErase (924,140,736 samples, 0.03%)bitcoind::leveldb::(anonymous namespace)::LRUCache::Unref (924,140,736 samples, 0.03%)bitcoind::leveldb::DeleteEntry (924,140,736 samples, 0.03%)libc.so.6::__munmap (924,140,736 samples, 0.03%)[unknown] (924,140,736 samples, 0.03%)[unknown] (924,140,736 samples, 0.03%)[unknown] (924,140,736 samples, 0.03%)[unknown] (924,140,736 samples, 0.03%)[unknown] (924,140,736 samples, 0.03%)[unknown] (924,140,736 samples, 0.03%)[unknown] (924,140,736 samples, 0.03%)[unknown] (924,140,736 samples, 0.03%)[unknown] (924,140,736 samples, 0.03%)[unknown] (622,330,840 samples, 0.02%)[[jbd2]] (579,680,739 samples, 0.02%)bitcoind::leveldb::DBImpl::DeleteObsoleteFiles (5,386,961,700 samples, 0.15%)libc.so.6::__unlink (4,462,820,964 samples, 0.13%)[unknown] (4,462,820,964 samples, 0.13%)[unknown] (4,462,820,964 samples, 0.13%)[unknown] (4,462,820,964 samples, 0.13%)[unknown] (4,462,820,964 samples, 0.13%)[unknown] (4,462,820,964 samples, 0.13%)[[ext4]] (4,462,820,964 samples, 0.13%)[unknown] (4,413,928,808 samples, 0.12%)[unknown] (4,413,928,808 samples, 0.12%)[unknown] (3,629,480,214 samples, 0.10%)[unknown] (2,527,606,876 samples, 0.07%)[unknown] (1,289,801,972 samples, 0.04%)[unknown] (411,890,158 samples, 0.01%)bitcoind::leveldb::(anonymous namespace)::BytewiseComparatorImpl::Compare (1,451,370,022 samples, 0.04%)bitcoind::leveldb::(anonymous namespace)::BytewiseComparatorImpl::Compare (2,100,345,679 samples, 0.06%)bitcoind::leveldb::(anonymous namespace)::MergingIterator::FindSmallest (7,036,670,089 samples, 0.20%)bitcoind::leveldb::InternalKeyComparator::Compare (5,331,785,618 samples, 0.15%)libc.so.6::__memcmp_evex_movbe (467,739,292 samples, 0.01%)bitcoind::leveldb::Block::Iter::ParseNextKey (1,597,295,639 samples, 0.04%)bitcoind::leveldb::Block::Iter::key (719,412,755 samples, 0.02%)bitcoind::leveldb::(anonymous namespace)::TwoLevelIterator::Next (3,651,719,685 samples, 0.10%)[unknown] (775,514,001 samples, 0.02%)[unknown] (775,514,001 samples, 0.02%)[unknown] (775,514,001 samples, 0.02%)[unknown] (775,514,001 samples, 0.02%)[unknown] (723,468,265 samples, 0.02%)[unknown] (671,854,971 samples, 0.02%)[unknown] (620,745,631 samples, 0.02%)[unknown] (467,020,775 samples, 0.01%)bitcoind::leveldb::ReadBlock (5,036,746,240 samples, 0.14%)bitcoind::crc32c::ExtendSse42 (4,003,982,142 samples, 0.11%)bitcoind::leveldb::(anonymous namespace)::TwoLevelIterator::InitDataBlock (6,011,618,239 samples, 0.17%)bitcoind::leveldb::Table::BlockReader (5,654,181,527 samples, 0.16%)bitcoind::leveldb::(anonymous namespace)::TwoLevelIterator::SkipEmptyDataBlocksForward (6,370,810,843 samples, 0.18%)bitcoind::leveldb::Block::Iter::Valid (514,863,214 samples, 0.01%)bitcoind::leveldb::(anonymous namespace)::TwoLevelIterator::Next (11,878,686,935 samples, 0.33%)[unknown] (357,525,803 samples, 0.01%)bitcoind::leveldb::ReadBlock (1,021,671,534 samples, 0.03%)bitcoind::crc32c::ExtendSse42 (664,145,731 samples, 0.02%)bitcoind::leveldb::(anonymous namespace)::TwoLevelIterator::InitDataBlock (1,177,165,099 samples, 0.03%)bitcoind::leveldb::Table::BlockReader (1,073,054,446 samples, 0.03%)bitcoind::leveldb::(anonymous namespace)::TwoLevelIterator::SkipEmptyDataBlocksForward (3,944,657,665 samples, 0.11%)bitcoind::leveldb::(anonymous namespace)::TwoLevelIterator::Valid (468,344,432 samples, 0.01%)bitcoind::leveldb::Block::Iter::Valid (360,443,695 samples, 0.01%)bitcoind::leveldb::(anonymous namespace)::MergingIterator::Next (25,840,019,062 samples, 0.73%)bitcoind::leveldb::InternalKeyComparator::Compare (877,755,927 samples, 0.02%)bitcoind::leveldb::(anonymous namespace)::MergingIterator::value (460,966,118 samples, 0.01%)bitcoind::leveldb::(anonymous namespace)::TwoLevelIterator::value (1,168,083,499 samples, 0.03%)bitcoind::leveldb::(anonymous namespace)::BytewiseComparatorImpl::Compare (1,027,703,208 samples, 0.03%)bitcoind::leveldb::Compaction::IsBaseLevelForKey (3,331,453,084 samples, 0.09%)libc.so.6::__memcmp_evex_movbe (1,380,364,868 samples, 0.04%)bitcoind::leveldb::(anonymous namespace)::BytewiseComparatorImpl::Compare (1,428,625,135 samples, 0.04%)bitcoind::leveldb::Compaction::ShouldStopBefore (5,019,787,360 samples, 0.14%)bitcoind::leveldb::InternalKeyComparator::Compare (3,376,359,370 samples, 0.09%)libc.so.6::__memcmp_evex_movbe (1,229,056,330 samples, 0.03%)bitcoind::leveldb::DBImpl::DeleteObsoleteFiles (947,024,277 samples, 0.03%)libc.so.6::__unlink (947,024,277 samples, 0.03%)[unknown] (947,024,277 samples, 0.03%)[unknown] (947,024,277 samples, 0.03%)[unknown] (947,024,277 samples, 0.03%)[unknown] (947,024,277 samples, 0.03%)[unknown] (947,024,277 samples, 0.03%)[[ext4]] (947,024,277 samples, 0.03%)[unknown] (947,024,277 samples, 0.03%)[unknown] (947,024,277 samples, 0.03%)[unknown] (891,341,341 samples, 0.03%)[unknown] (632,138,490 samples, 0.02%)[unknown] (416,723,130 samples, 0.01%)bitcoind::leveldb::MemTableIterator::key (1,087,232,643 samples, 0.03%)bitcoind::leveldb::(anonymous namespace)::BytewiseComparatorImpl::Compare (7,618,678,897 samples, 0.21%)bitcoind::leveldb::(anonymous namespace)::BytewiseComparatorImpl::Compare (3,626,299,706 samples, 0.10%)bitcoind::leveldb::InternalKeyComparator::Compare (5,542,426,443 samples, 0.16%)bitcoind::leveldb::PutVarint32 (983,352,417 samples, 0.03%)bitcoind::leveldb::EncodeVarint32 (516,604,326 samples, 0.01%)bitcoind::leveldb::BlockBuilder::Add (15,702,002,539 samples, 0.44%)bitcoind::leveldb::FilterBlockBuilder::AddKey (412,090,761 samples, 0.01%)bitcoind::leveldb::(anonymous namespace)::BloomFilterPolicy::CreateFilter (2,066,056,339 samples, 0.06%)bitcoind::leveldb::Hash (665,922,831 samples, 0.02%)bitcoind::leveldb::FilterBlockBuilder::StartBlock (2,377,127,267 samples, 0.07%)bitcoind::leveldb::FilterBlockBuilder::GenerateFilter (2,377,127,267 samples, 0.07%)bitcoind::leveldb::InternalKeyComparator::Compare (774,664,618 samples, 0.02%)[[ext4]] (567,830,671 samples, 0.02%)[[ext4]] (929,740,986 samples, 0.03%)[unknown] (361,910,315 samples, 0.01%)[[ext4]] (3,216,007,087 samples, 0.09%)[unknown] (2,077,722,358 samples, 0.06%)[unknown] (2,025,638,088 samples, 0.06%)[unknown] (1,766,421,841 samples, 0.05%)[unknown] (1,349,297,830 samples, 0.04%)[unknown] (985,540,031 samples, 0.03%)[[ext4]] (4,245,378,964 samples, 0.12%)[unknown] (4,245,378,964 samples, 0.12%)[unknown] (821,567,389 samples, 0.02%)bitcoind::leveldb::TableBuilder::Flush (5,177,109,910 samples, 0.15%)libc.so.6::__GI___libc_write (4,762,579,653 samples, 0.13%)[unknown] (4,762,579,653 samples, 0.13%)[unknown] (4,762,579,653 samples, 0.13%)[unknown] (4,762,579,653 samples, 0.13%)[unknown] (4,607,316,631 samples, 0.13%)libc.so.6::__memcmp_evex_movbe (2,327,620,616 samples, 0.07%)bitcoind::leveldb::TableBuilder::Add (29,098,360,859 samples, 0.82%)libc.so.6::__memmove_avx512_unaligned_erms (880,376,005 samples, 0.02%)[[ext4]] (576,641,035 samples, 0.02%)[unknown] (419,245,830 samples, 0.01%)[[ext4]] (681,686,302 samples, 0.02%)[[ext4]] (886,736,982 samples, 0.02%)[[ext4]] (886,736,982 samples, 0.02%)[[ext4]] (886,736,982 samples, 0.02%)bitcoind::leveldb::BuildTable (31,594,879,610 samples, 0.89%)libc.so.6::fdatasync (990,237,376 samples, 0.03%)[unknown] (990,237,376 samples, 0.03%)[unknown] (990,237,376 samples, 0.03%)[unknown] (990,237,376 samples, 0.03%)[[ext4]] (990,237,376 samples, 0.03%)[unknown] (990,237,376 samples, 0.03%)[unknown] (990,237,376 samples, 0.03%)[unknown] (990,237,376 samples, 0.03%)[unknown] (990,237,376 samples, 0.03%)bitcoind::leveldb::DBImpl::CompactMemTable (32,644,397,020 samples, 0.92%)bitcoind::leveldb::DBImpl::WriteLevel0Table (31,697,372,743 samples, 0.89%)[[ext4]] (360,420,776 samples, 0.01%)bitcoind::leveldb::TableBuilder::Finish (565,702,739 samples, 0.02%)bitcoind::leveldb::TableBuilder::WriteRawBlock (411,712,919 samples, 0.01%)libc.so.6::__GI___libc_write (411,712,919 samples, 0.01%)[unknown] (411,712,919 samples, 0.01%)[unknown] (411,712,919 samples, 0.01%)[unknown] (411,712,919 samples, 0.01%)[unknown] (411,712,919 samples, 0.01%)[[ext4]] (411,712,919 samples, 0.01%)[unknown] (411,712,919 samples, 0.01%)[[ext4]] (2,407,378,967 samples, 0.07%)[unknown] (1,896,402,811 samples, 0.05%)[unknown] (1,223,588,483 samples, 0.03%)[unknown] (359,102,837 samples, 0.01%)[[ext4]] (3,282,391,421 samples, 0.09%)[unknown] (669,408,205 samples, 0.02%)[[nvme]] (410,427,902 samples, 0.01%)[[nvme]] (410,427,902 samples, 0.01%)[unknown] (410,427,902 samples, 0.01%)[unknown] (410,427,902 samples, 0.01%)[[ext4]] (5,846,551,102 samples, 0.16%)[unknown] (1,539,251,741 samples, 0.04%)[unknown] (1,332,958,992 samples, 0.04%)[unknown] (1,230,554,197 samples, 0.03%)[unknown] (1,230,554,197 samples, 0.03%)[unknown] (1,230,554,197 samples, 0.03%)[unknown] (410,593,098 samples, 0.01%)[[ext4]] (5,999,990,575 samples, 0.17%)[[ext4]] (5,999,990,575 samples, 0.17%)bitcoind::leveldb::DBImpl::FinishCompactionOutputFile (7,646,287,561 samples, 0.22%)libc.so.6::fdatasync (6,926,959,748 samples, 0.19%)[unknown] (6,926,959,748 samples, 0.19%)[unknown] (6,926,959,748 samples, 0.19%)[unknown] (6,926,959,748 samples, 0.19%)[[ext4]] (6,926,959,748 samples, 0.19%)[unknown] (6,926,959,748 samples, 0.19%)[unknown] (6,926,959,748 samples, 0.19%)[unknown] (6,926,959,748 samples, 0.19%)[unknown] (6,720,904,548 samples, 0.19%)[unknown] (618,546,651 samples, 0.02%)[unknown] (618,541,816 samples, 0.02%)[unknown] (618,541,816 samples, 0.02%)[unknown] (618,541,816 samples, 0.02%)[unknown] (513,838,124 samples, 0.01%)[unknown] (411,261,494 samples, 0.01%)bitcoind::leveldb::InternalKeyComparator::Compare (1,176,215,358 samples, 0.03%)bitcoind::leveldb::(anonymous namespace)::BytewiseComparatorImpl::Compare (1,597,987,748 samples, 0.04%)bitcoind::leveldb::EncodeVarint32 (667,083,479 samples, 0.02%)bitcoind::leveldb::(anonymous namespace)::BytewiseComparatorImpl::Compare (3,023,925,193 samples, 0.09%)bitcoind::leveldb::InternalKeyComparator::Compare (5,485,200,607 samples, 0.15%)libc.so.6::__memcmp_evex_movbe (768,462,744 samples, 0.02%)bitcoind::leveldb::BlockBuilder::Add (19,355,464,658 samples, 0.54%)bitcoind::leveldb::PutVarint32 (3,963,072,776 samples, 0.11%)bitcoind::leveldb::EncodeVarint32 (2,006,933,285 samples, 0.06%)bitcoind::leveldb::FilterBlockBuilder::AddKey (1,861,448,821 samples, 0.05%)bitcoind::leveldb::(anonymous namespace)::BloomFilterPolicy::CreateFilter (13,758,298,035 samples, 0.39%)bitcoind::leveldb::Hash (5,062,387,301 samples, 0.14%)bitcoind::leveldb::InternalFilterPolicy::CreateFilter (408,507,196 samples, 0.01%)bitcoind::std::vector<leveldb::Slice, std::allocator<leveldb::Slice> >::_M_default_append (1,029,970,476 samples, 0.03%)bitcoind::leveldb::FilterBlockBuilder::GenerateFilter (16,416,940,319 samples, 0.46%)bitcoind::leveldb::FilterBlockBuilder::StartBlock (16,468,035,714 samples, 0.46%)bitcoind::leveldb::(anonymous namespace)::BytewiseComparatorImpl::Compare (1,040,462,681 samples, 0.03%)bitcoind::leveldb::InternalKeyComparator::Compare (2,329,875,977 samples, 0.07%)bitcoind::leveldb::InternalKeyComparator::FindShortestSeparator (358,735,789 samples, 0.01%)bitcoind::leveldb::PutVarint32 (610,801,466 samples, 0.02%)bitcoind::crc32c::ExtendSse42 (874,382,210 samples, 0.02%)bitcoind::leveldb::TableBuilder::WriteBlock (1,806,524,733 samples, 0.05%)bitcoind::leveldb::TableBuilder::WriteRawBlock (1,390,163,236 samples, 0.04%)libc.so.6::__memmove_avx512_unaligned_erms (413,005,584 samples, 0.01%)[[ext4]] (720,896,427 samples, 0.02%)[[ext4]] (2,836,852,977 samples, 0.08%)[unknown] (1,356,279,497 samples, 0.04%)[[ext4]] (3,876,087,820 samples, 0.11%)[unknown] (634,447,162 samples, 0.02%)[[ext4]] (6,595,884,839 samples, 0.19%)[unknown] (2,409,927,037 samples, 0.07%)[unknown] (2,152,146,763 samples, 0.06%)[unknown] (1,946,544,284 samples, 0.05%)[unknown] (1,691,057,617 samples, 0.05%)[unknown] (1,332,315,567 samples, 0.04%)[unknown] (618,194,201 samples, 0.02%)[unknown] (411,783,313 samples, 0.01%)[[ext4]] (21,402,165,352 samples, 0.60%)[unknown] (13,825,328,165 samples, 0.39%)[unknown] (12,948,506,018 samples, 0.36%)[unknown] (10,591,496,268 samples, 0.30%)[unknown] (8,635,293,060 samples, 0.24%)[unknown] (5,512,816,463 samples, 0.16%)[unknown] (1,755,230,935 samples, 0.05%)[unknown] (358,610,982 samples, 0.01%)[[ext4]] (26,848,872,865 samples, 0.76%)[unknown] (26,183,441,807 samples, 0.74%)[unknown] (3,805,768,350 samples, 0.11%)[unknown] (2,522,380,066 samples, 0.07%)libc.so.6::__GI___libc_write (29,870,807,469 samples, 0.84%)[unknown] (29,663,737,328 samples, 0.83%)[unknown] (29,456,391,053 samples, 0.83%)[unknown] (29,306,607,963 samples, 0.82%)[unknown] (28,793,621,717 samples, 0.81%)[unknown] (869,287,921 samples, 0.02%)bitcoind::leveldb::TableBuilder::Flush (32,039,566,359 samples, 0.90%)bitcoind::leveldb::TableBuilder::status (2,416,608,293 samples, 0.07%)bitcoind::memcpy@plt (1,533,086,169 samples, 0.04%)libc.so.6::__memcmp_evex_movbe (11,663,095,994 samples, 0.33%)libc.so.6::__memmove_avx512_unaligned_erms (6,084,682,703 samples, 0.17%)bitcoind::leveldb::TableBuilder::Add (101,316,031,082 samples, 2.85%)bitc..bitcoind::leveldb::TableBuilder::NumEntries (460,667,349 samples, 0.01%)libc.so.6::__memcmp_evex_movbe (359,824,779 samples, 0.01%)bitcoind::leveldb::DBImpl::DoCompactionWork (188,768,693,249 samples, 5.31%)bitcoind:..libc.so.6::__memmove_avx512_unaligned_erms (972,425,560 samples, 0.03%)bitcoind::leveldb::TableBuilder::NumEntries (767,314,029 samples, 0.02%)bitcoind::leveldb::DBImpl::BackgroundCompaction (198,697,568,504 samples, 5.59%)bitcoind::..libc.so.6::__memmove_avx512_unaligned_erms (569,144,596 samples, 0.02%)bitcoind::leveldb::DBImpl::DeleteObsoleteFiles (591,819,871 samples, 0.02%)libc.so.6::__unlink (591,819,871 samples, 0.02%)[unknown] (591,819,871 samples, 0.02%)[unknown] (591,819,871 samples, 0.02%)[unknown] (591,819,871 samples, 0.02%)[unknown] (591,819,871 samples, 0.02%)[unknown] (591,819,871 samples, 0.02%)[[ext4]] (591,819,871 samples, 0.02%)[unknown] (591,819,871 samples, 0.02%)[unknown] (591,819,871 samples, 0.02%)[unknown] (479,954,726 samples, 0.01%)[unknown] (428,868,095 samples, 0.01%)bitcoind::leveldb::(anonymous namespace)::BytewiseComparatorImpl::Compare (4,760,717,074 samples, 0.13%)bitcoind::leveldb::(anonymous namespace)::BytewiseComparatorImpl::Compare (1,338,954,347 samples, 0.04%)bitcoind::leveldb::InternalKeyComparator::Compare (2,113,914,207 samples, 0.06%)bitcoind::leveldb::BlockBuilder::Add (8,483,080,141 samples, 0.24%)bitcoind::leveldb::PutVarint32 (468,110,226 samples, 0.01%)bitcoind::leveldb::(anonymous namespace)::BloomFilterPolicy::CreateFilter (981,556,026 samples, 0.03%)bitcoind::leveldb::Hash (364,078,664 samples, 0.01%)bitcoind::leveldb::FilterBlockBuilder::StartBlock (1,085,605,353 samples, 0.03%)bitcoind::leveldb::FilterBlockBuilder::GenerateFilter (1,085,605,353 samples, 0.03%)[[ext4]] (363,216,075 samples, 0.01%)[[ext4]] (414,434,148 samples, 0.01%)[[ext4]] (622,462,403 samples, 0.02%)[[ext4]] (2,219,690,360 samples, 0.06%)[unknown] (1,545,848,943 samples, 0.04%)[unknown] (1,545,848,943 samples, 0.04%)[unknown] (1,344,249,592 samples, 0.04%)[unknown] (1,034,709,836 samples, 0.03%)[unknown] (463,122,475 samples, 0.01%)[[ext4]] (2,730,864,687 samples, 0.08%)[unknown] (2,627,509,960 samples, 0.07%)bitcoind::leveldb::TableBuilder::Flush (2,941,422,377 samples, 0.08%)libc.so.6::__GI___libc_write (2,889,358,538 samples, 0.08%)[unknown] (2,889,358,538 samples, 0.08%)[unknown] (2,837,160,085 samples, 0.08%)[unknown] (2,837,160,085 samples, 0.08%)[unknown] (2,837,160,085 samples, 0.08%)libc.so.6::__memcmp_evex_movbe (870,026,684 samples, 0.02%)bitcoind::leveldb::TableBuilder::Add (14,671,945,001 samples, 0.41%)libc.so.6::__memmove_avx512_unaligned_erms (516,334,186 samples, 0.01%)[[ext4]] (366,285,823 samples, 0.01%)bitcoind::leveldb::BuildTable (15,764,968,843 samples, 0.44%)libc.so.6::fdatasync (522,804,809 samples, 0.01%)[unknown] (522,804,809 samples, 0.01%)[unknown] (522,804,809 samples, 0.01%)[unknown] (522,804,809 samples, 0.01%)[[ext4]] (522,804,809 samples, 0.01%)[unknown] (522,804,809 samples, 0.01%)[unknown] (522,804,809 samples, 0.01%)[unknown] (522,804,809 samples, 0.01%)[unknown] (522,804,809 samples, 0.01%)[[ext4]] (470,717,222 samples, 0.01%)[[ext4]] (470,717,222 samples, 0.01%)[[ext4]] (470,717,222 samples, 0.01%)libstdc++.so.6.0.32::execute_native_thread_routine (215,158,735,915 samples, 6.05%)libstdc++.s..bitcoind::leveldb::(anonymous namespace)::PosixEnv::BackgroundThreadEntryPoint (215,158,735,915 samples, 6.05%)bitcoind::l..bitcoind::leveldb::DBImpl::BackgroundCall (215,158,735,915 samples, 6.05%)bitcoind::l..bitcoind::leveldb::DBImpl::CompactMemTable (16,461,167,411 samples, 0.46%)bitcoind::leveldb::DBImpl::WriteLevel0Table (15,869,347,540 samples, 0.45%)bitcoind (236,278,709,104 samples, 6.65%)bitcoindall (3,555,551,407,309 samples, 100%) diff --git a/flake.lock b/flake.lock new file mode 100644 index 000000000000..fc1308c520fa --- /dev/null +++ b/flake.lock @@ -0,0 +1,27 @@ +{ + "nodes": { + "nixpkgs": { + "locked": { + "lastModified": 1764983851, + "narHash": "sha256-y7RPKl/jJ/KAP/VKLMghMgXTlvNIJMHKskl8/Uuar7o=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "d9bc5c7dceb30d8d6fafa10aeb6aa8a48c218454", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-25.11", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "nixpkgs": "nixpkgs" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 000000000000..b42180629d1a --- /dev/null +++ b/flake.nix @@ -0,0 +1,170 @@ +{ + description = "bitcoind for benchmarking"; + + inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.11"; + + outputs = + { self, nixpkgs }: + let + systems = [ + "x86_64-linux" + "aarch64-darwin" + ]; + + forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system); + + pkgsFor = system: import nixpkgs { inherit system; }; + + mkBitcoinCore = + system: + let + pkgs = pkgsFor system; + inherit (pkgs) lib; + + pname = "bitcoin-core"; + version = self.shortRev or "dirty"; + + CFlags = toString [ + "-O2" + "-g" + ]; + CXXFlags = "${CFlags} -fno-omit-frame-pointer"; + + nativeBuildInputs = [ + pkgs.cmake + pkgs.ninja + pkgs.pkg-config + pkgs.python3 + ]; + + buildInputs = [ + pkgs.boost188.dev + pkgs.libevent.dev + ]; + + cmakeFlags = [ + "-DBUILD_BENCH=OFF" + "-DBUILD_BITCOIN_BIN=OFF" + "-DBUILD_CLI=OFF" + "-DBUILD_DAEMON=ON" + "-DBUILD_FUZZ_BINARY=OFF" + "-DBUILD_GUI_TESTS=OFF" + "-DBUILD_TESTS=OFF" + "-DBUILD_TX=OFF" + "-DBUILD_UTIL=OFF" + "-DBUILD_WALLET_TOOL=OFF" + "-DCMAKE_BUILD_TYPE=RelWithDebInfo" + "-DCMAKE_SKIP_RPATH=ON" + "-DENABLE_EXTERNAL_SIGNER=OFF" + "-DENABLE_IPC=OFF" + "-DENABLE_WALLET=OFF" + "-DREDUCE_EXPORTS=ON" + "-DWITH_ZMQ=OFF" + ]; + in + pkgs.stdenv.mkDerivation { + inherit + pname + version + nativeBuildInputs + buildInputs + cmakeFlags + ; + + preConfigure = '' + cmakeFlagsArray+=( + "-DAPPEND_CFLAGS=${CFlags}" + "-DAPPEND_CXXFLAGS=${CXXFlags}" + "-DAPPEND_LDFLAGS=-Wl,--as-needed -Wl,-O2" + ) + ''; + + src = builtins.path { + path = ./.; + name = "source"; + }; + + env = { + CMAKE_GENERATOR = "Ninja"; + LC_ALL = "C"; + LIBRARY_PATH = ""; + CPATH = ""; + C_INCLUDE_PATH = ""; + CPLUS_INCLUDE_PATH = ""; + OBJC_INCLUDE_PATH = ""; + OBJCPLUS_INCLUDE_PATH = ""; + }; + + dontStrip = true; + + meta = { + description = "bitcoind for benchmarking"; + homepage = "https://bitcoincore.org/"; + license = lib.licenses.mit; + }; + }; + in + { + packages = forAllSystems (system: { + default = mkBitcoinCore system; + }); + + formatter = forAllSystems (system: (pkgsFor system).nixfmt-tree); + + devShells = forAllSystems ( + system: + let + pkgs = pkgsFor system; + inherit (pkgs) stdenv; + + # Override the default cargo-flamegraph with a custom fork including bitcoin highlighting + cargo-flamegraph = pkgs.rustPlatform.buildRustPackage rec { + pname = "flamegraph"; + version = "bitcoin-core"; + + src = pkgs.fetchFromGitHub { + owner = "willcl-ark"; + repo = "flamegraph"; + rev = "bitcoin-core"; + sha256 = "sha256-tQbr3MYfAiOxeT12V9au5KQK5X5JeGuV6p8GR/Sgen4="; + }; + + doCheck = false; + cargoHash = "sha256-QWPqTyTFSZNJNayNqLmsQSu0rX26XBKfdLROZ9tRjrg="; + + nativeBuildInputs = pkgs.lib.optionals stdenv.hostPlatform.isLinux [ pkgs.makeWrapper ]; + buildInputs = pkgs.lib.optionals stdenv.hostPlatform.isDarwin [ + pkgs.darwin.apple_sdk.frameworks.Security + ]; + + postFixup = pkgs.lib.optionalString stdenv.hostPlatform.isLinux '' + wrapProgram $out/bin/cargo-flamegraph \ + --set-default PERF ${pkgs.perf}/bin/perf + wrapProgram $out/bin/flamegraph \ + --set-default PERF ${pkgs.perf}/bin/perf + ''; + }; + in + { + default = pkgs.mkShell { + buildInputs = [ + # Benchmarking + cargo-flamegraph + pkgs.flamegraph + pkgs.hyperfine + pkgs.jq + pkgs.just + pkgs.perf + pkgs.perf-tools + pkgs.python312 + pkgs.python312Packages.matplotlib + pkgs.util-linux + + # Binary patching + pkgs.patchelf + ]; + }; + } + ); + }; +} diff --git a/justfile b/justfile new file mode 100644 index 000000000000..d128c7e8b195 --- /dev/null +++ b/justfile @@ -0,0 +1,115 @@ +set shell := ["bash", "-uc"] + +default: + just --list + +# ============================================================================ +# Local benchmarking commands +# ============================================================================ + +# Test instrumented run using signet (includes report generation) +[group('local')] +test-instrumented base head datadir: + nix develop --command python3 bench.py build --skip-existing {{ base }}:base {{ head }}:head + nix develop --command python3 bench.py --profile quick run \ + --chain signet \ + --instrumented \ + --datadir {{ datadir }} \ + base:./binaries/base/bitcoind \ + head:./binaries/head/bitcoind + nix develop --command python3 bench.py report bench-output/ bench-output/ + +# Test uninstrumented run using signet +[group('local')] +test-uninstrumented base head datadir: + nix develop --command python3 bench.py build --skip-existing {{ base }}:base {{ head }}:head + nix develop --command python3 bench.py --profile quick run \ + --chain signet \ + --datadir {{ datadir }} \ + base:./binaries/base/bitcoind \ + head:./binaries/head/bitcoind + +# Full benchmark with instrumentation (flamegraphs + plots) +[group('local')] +instrumented base head datadir: + python3 bench.py build {{ base }}:base {{ head }}:head + python3 bench.py --profile quick run \ + --instrumented \ + --datadir {{ datadir }} \ + base:./binaries/base/bitcoind \ + head:./binaries/head/bitcoind + +# Just build binaries (useful for incremental testing) +[group('local')] +build *commits: + python3 bench.py build {{ commits }} + +# Run benchmark with pre-built binaries +[group('local')] +run datadir *binaries: + python3 bench.py run --datadir {{ datadir }} {{ binaries }} + +# Generate plots from a debug.log file +[group('local')] +analyze commit logfile output_dir="./plots": + python3 bench.py analyze {{ commit }} {{ logfile }} --output-dir {{ output_dir }} + +# Compare benchmark results +[group('local')] +compare *results_files: + python3 bench.py compare {{ results_files }} + +# Generate HTML report from benchmark results +[group('local')] +report input_dir output_dir: + python3 bench.py report {{ input_dir }} {{ output_dir }} + +# ============================================================================ +# CI commands (called by GitHub Actions) +# ============================================================================ + +# Build binaries for CI +[group('ci')] +ci-build base_commit head_commit binaries_dir: + python3 bench.py build -o {{ binaries_dir }} {{ base_commit }}:base {{ head_commit }}:head + +# Run uninstrumented benchmarks for CI +[group('ci')] +ci-run datadir tmp_datadir output_dir dbcache binaries_dir: + python3 bench.py --profile ci run \ + --datadir {{ datadir }} \ + --tmp-datadir {{ tmp_datadir }} \ + --output-dir {{ output_dir }} \ + --dbcache {{ dbcache }} \ + base:{{ binaries_dir }}/base/bitcoind \ + head:{{ binaries_dir }}/head/bitcoind + +# Run instrumented benchmarks for CI +[group('ci')] +ci-run-instrumented datadir tmp_datadir output_dir dbcache binaries_dir: + python3 bench.py --profile ci run \ + --instrumented \ + --datadir {{ datadir }} \ + --tmp-datadir {{ tmp_datadir }} \ + --output-dir {{ output_dir }} \ + --dbcache {{ dbcache }} \ + base:{{ binaries_dir }}/base/bitcoind \ + head:{{ binaries_dir }}/head/bitcoind + +# ============================================================================ +# Git helpers +# ============================================================================ + +# Cherry-pick commits from a Bitcoin Core PR onto this branch +[group('git')] +pick-pr pr_number: + #!/usr/bin/env bash + set -euxo pipefail + + if ! git remote get-url upstream 2>/dev/null | grep -q "bitcoin/bitcoin"; then + echo "Error: 'upstream' remote not found or doesn't point to bitcoin/bitcoin" + echo "Please add it with: git remote add upstream https://github.com/bitcoin/bitcoin.git" + exit 1 + fi + + git fetch upstream pull/{{ pr_number }}/head:bench-{{ pr_number }} && git cherry-pick $(git rev-list --reverse bench-{{ pr_number }} --not upstream/master) From 8ad21e90e8088fc45d1df32ba82284af81532a83 Mon Sep 17 00:00:00 2001 From: will Date: Wed, 7 Jan 2026 09:50:53 +0000 Subject: [PATCH 02/46] don't compare to master in prs --- .github/workflows/benchmark.yml | 29 +-- .github/workflows/publish-results.yml | 49 +++- bench.py | 22 +- bench/configs/pr.toml | 8 +- bench/nightly.py | 144 ++++++++++++ bench/report.py | 311 +++++++++++++++++++------- 6 files changed, 439 insertions(+), 124 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index d276bf2e59b8..9d8693f91754 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -5,26 +5,21 @@ on: - master jobs: - build-binaries: + build-binary: runs-on: [self-hosted, linux, x64] - env: - BASE_SHA: ${{ github.event.pull_request.base.sha }} steps: - name: Checkout repo uses: actions/checkout@v4 with: fetch-depth: 1 - - name: Fetch base commit - run: | - echo "HEAD_SHA=$(git rev-parse HEAD)" >> "$GITHUB_ENV" - git fetch --depth=1 origin ${{ github.event.pull_request.base.sha }} - - - name: Build both binaries + - name: Build PR binary run: | + HEAD_SHA=$(git rev-parse HEAD) + echo "HEAD_SHA=${HEAD_SHA}" >> "$GITHUB_ENV" nix develop --command python3 bench.py build \ -o ${{ runner.temp }}/binaries \ - $BASE_SHA:base $HEAD_SHA:head + ${HEAD_SHA}:pr - name: Upload binaries uses: actions/upload-artifact@v4 @@ -33,7 +28,7 @@ jobs: path: ${{ runner.temp }}/binaries/ benchmark: - needs: build-binaries + needs: build-binary strategy: matrix: # Matrix entries from configs/pr.toml: dbcache=[450,32000] x instrumented=[false,true] @@ -42,7 +37,6 @@ jobs: timeout-minutes: 600 env: ORIGINAL_DATADIR: /data/pruned-840k - BASE_SHA: ${{ github.event.pull_request.base.sha }} steps: - name: Checkout repo uses: actions/checkout@v4 @@ -57,13 +51,7 @@ jobs: - name: Set binary permissions run: | - chmod +x ${{ runner.temp }}/binaries/base/bitcoind - chmod +x ${{ runner.temp }}/binaries/head/bitcoind - - - name: Fetch base commit - run: | - echo "HEAD_SHA=$(git rev-parse HEAD)" >> "$GITHUB_ENV" - git fetch --depth=1 origin ${{ github.event.pull_request.base.sha }} + chmod +x ${{ runner.temp }}/binaries/pr/bitcoind - name: Run benchmark run: | @@ -73,8 +61,7 @@ jobs: --datadir $ORIGINAL_DATADIR \ --tmp-datadir ${{ runner.temp }}/datadir \ --output-dir ${{ runner.temp }}/output \ - base:${{ runner.temp }}/binaries/base/bitcoind \ - head:${{ runner.temp }}/binaries/head/bitcoind + pr:${{ runner.temp }}/binaries/pr/bitcoind - name: Upload results uses: actions/upload-artifact@v4 diff --git a/.github/workflows/publish-results.yml b/.github/workflows/publish-results.yml index cde65f34a456..e713cb3a57af 100644 --- a/.github/workflows/publish-results.yml +++ b/.github/workflows/publish-results.yml @@ -15,7 +15,7 @@ jobs: # Matrix entries from configs/pr.toml: dbcache=[450,32000] x instrumented=[false,true] NETWORKS: "450-true,32000-true,450-false,32000-false" outputs: - speedups: ${{ steps.generate.outputs.speedups }} + comparison: ${{ steps.generate.outputs.comparison }} pr-number: ${{ steps.metadata.outputs.pr-number }} result-url: ${{ steps.generate.outputs.result-url }} steps: @@ -65,14 +65,16 @@ jobs: - name: Extract metadata id: metadata run: | - # Find PR number and run ID from any available metadata + # Find PR number, run ID, and commit from any available metadata for network in ${NETWORKS//,/ }; do if [ -f "${network}-metadata/github.json" ]; then PR_NUMBER=$(jq -r '.event.pull_request.number // "main"' "${network}-metadata/github.json") RUN_ID=$(jq -r '.run_id' "${network}-metadata/github.json") + HEAD_SHA=$(jq -r '.event.pull_request.head.sha // .sha' "${network}-metadata/github.json") echo "pr-number=${PR_NUMBER}" >> $GITHUB_OUTPUT echo "run-id=${RUN_ID}" >> $GITHUB_OUTPUT - echo "Found metadata: PR=${PR_NUMBER}, Run=${RUN_ID}" + echo "head-sha=${HEAD_SHA}" >> $GITHUB_OUTPUT + echo "Found metadata: PR=${PR_NUMBER}, Run=${RUN_ID}, Commit=${HEAD_SHA}" break fi done @@ -87,6 +89,7 @@ jobs: env: PR_NUMBER: ${{ steps.metadata.outputs.pr-number }} RUN_ID: ${{ steps.metadata.outputs.run-id }} + HEAD_SHA: ${{ steps.metadata.outputs.head-sha }} run: | cd benchcoin-tools @@ -98,17 +101,40 @@ jobs: fi done - # Generate report + # Generate report with nightly comparison python3 bench.py report \ ${NETWORK_ARGS} \ --pr-number "${PR_NUMBER}" \ --run-id "${RUN_ID}" \ + --commit "${HEAD_SHA}" \ + --nightly-history ../nightly-history.json \ --update-index \ "../results/pr-${PR_NUMBER}/${RUN_ID}" - # Read speedups from generated results.json (filter for uninstrumented runs: *-false) - SPEEDUPS=$(jq -r '.speedups | to_entries | map(select(.key | endswith("-false"))) | map("\(.key): \(.value)%") | join(", ")' "../results/pr-${PR_NUMBER}/${RUN_ID}/results.json") - echo "speedups=${SPEEDUPS}" >> $GITHUB_OUTPUT + # Build comparison summary for PR comment + if [ -f "../nightly-history.json" ]; then + COMPARISON=$(jq -r ' + if .nightly_comparison then + .nightly_comparison | to_entries | map( + "\(.key) MB: \(.value.pr_mean / 60 | floor) min" + + if .value.nightly_mean then + " (nightly: \(.value.nightly_mean / 60 | floor) min, \(.value.nightly_date)) β†’ " + + if .value.speedup_percent > 0 then "+\(.value.speedup_percent)% faster" + elif .value.speedup_percent < 0 then "\(.value.speedup_percent)% slower" + else "same" + end + else " (no nightly baseline)" + end + ) | join("\n- ") + else "No comparison data available" + end + ' "../results/pr-${PR_NUMBER}/${RUN_ID}/results.json") + else + COMPARISON="No nightly history available for comparison" + fi + echo "comparison<> $GITHUB_OUTPUT + echo "${COMPARISON}" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT RESULT_URL="https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/results/pr-${PR_NUMBER}/${RUN_ID}/index.html" echo "result-url=${RESULT_URL}" >> $GITHUB_OUTPUT @@ -142,5 +168,10 @@ jobs: run: | gh pr comment ${{ needs.build.outputs.pr-number }} \ --repo ${{ github.repository }} \ - --body "πŸ“Š Benchmark results for this run (${{ github.event.workflow_run.id }}) will be available at: ${{ needs.build.outputs.result-url }} after the github pages \"build and deployment\" action has completed. - πŸš€ Speedups: ${{ needs.build.outputs.speedups }}" + --body "## Benchmark Results + + **Comparison to nightly master:** + - ${{ needs.build.outputs.comparison }} + + [View detailed results](${{ needs.build.outputs.result-url }}) + [View nightly trend chart](https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/)" diff --git a/bench.py b/bench.py index 2c3b9240e768..b64b89cc316b 100755 --- a/bench.py +++ b/bench.py @@ -275,7 +275,8 @@ def cmd_report(args: argparse.Namespace) -> int: logging.getLogger().setLevel(logging.DEBUG) output_dir = Path(args.output_dir) - phase = ReportPhase() + nightly_history_file = Path(args.nightly_history) if args.nightly_history else None + phase = ReportPhase(nightly_history_file=nightly_history_file) try: # CI multi-network mode @@ -300,6 +301,7 @@ def cmd_report(args: argparse.Namespace) -> int: title=args.title or "Benchmark Results", pr_number=args.pr_number, run_id=args.run_id, + commit=args.commit, ) # Update results index if we have a results directory @@ -323,12 +325,12 @@ def cmd_report(args: argparse.Namespace) -> int: title=args.title or "Benchmark Results", ) - # Print speedups + # Print nightly comparison (speedups vs nightly) if result.speedups: - logger.info("Speedups:") - for network, speedup in result.speedups.items(): + logger.info("Comparison to nightly:") + for config, speedup in result.speedups.items(): sign = "+" if speedup > 0 else "" - logger.info(f" {network}: {sign}{speedup}%") + logger.info(f" {config}: {sign}{speedup}%") return 0 except Exception as e: @@ -569,6 +571,16 @@ def main() -> int: action="store_true", help="Update main index.html (for CI reports)", ) + report_parser.add_argument( + "--nightly-history", + metavar="PATH", + help="Path to nightly-history.json for comparison against nightly baseline", + ) + report_parser.add_argument( + "--commit", + metavar="SHA", + help="PR commit hash (for chart display)", + ) report_parser.set_defaults(func=cmd_report) # Nightly command diff --git a/bench/configs/pr.toml b/bench/configs/pr.toml index f154d1a7b468..628d885c4cc6 100644 --- a/bench/configs/pr.toml +++ b/bench/configs/pr.toml @@ -1,17 +1,17 @@ -# PR benchmark configuration (base vs head comparison) +# PR benchmark configuration # Clone benchcoin + use this config = reproduce the benchmark # # Usage: # bench.py run --benchmark-config bench/configs/pr.toml --matrix-entry 450-false \ # --datadir /data/pruned-840k --output-dir ./output \ -# base:/path/to/base/bitcoind head:/path/to/head/bitcoind +# pr:/path/to/pr/bitcoind [benchmark] start_height = 840000 -runs = 3 +runs = 2 [bitcoind] -stopatheight = 855000 +stopatheight = 900000 chain = "main" connect = "148.251.128.115:33333" prune = 10000 diff --git a/bench/nightly.py b/bench/nightly.py index 947076241af1..15367aed7f6b 100644 --- a/bench/nightly.py +++ b/bench/nightly.py @@ -333,6 +333,25 @@ def append(self, result: NightlyResult) -> None: f"Appended result: {result.date} {result.commit[:8]} {result.config} {result.mean:.1f}s" ) + def get_latest(self, config: str) -> NightlyResult | None: + """Get the most recent result for a given config. + + Args: + config: Config name (e.g., '450', '32000') + + Returns: + Most recent NightlyResult for that config, or None if not found + """ + matching = [r for r in self.results if r.config == config] + if not matching: + return None + # Results are sorted by date, so last one is most recent + return matching[-1] + + def get_chart_data(self) -> list[dict]: + """Get results in format suitable for chart embedding.""" + return [r.to_dict() for r in self.results] + def append_from_results_json( self, results_file: Path, @@ -400,6 +419,131 @@ def generate_nightly_chart(history: NightlyHistory, output_file: Path) -> None: logger.info(f"Generated nightly chart: {output_file}") +# HTML/JS snippet for PR comparison chart (embedded in report) +PR_CHART_SNIPPET = """ +
    + + +""" + + +def generate_pr_chart_snippet( + history: NightlyHistory, + pr_results: list[dict], +) -> str: + """Generate HTML/JS snippet for PR comparison chart. + + Args: + history: NightlyHistory with nightly results + pr_results: List of PR result dicts with keys: config, mean, stddev, commit, date + + Returns: + HTML string to embed in report + """ + nightly_data = json.dumps(history.get_chart_data()) + pr_data = json.dumps(pr_results) + + return PR_CHART_SNIPPET.format(nightly_data=nightly_data, pr_data=pr_data) + + class NightlyPhase: """CLI interface for nightly benchmark operations.""" diff --git a/bench/report.py b/bench/report.py index 0a3b07348688..785a77f40fa4 100644 --- a/bench/report.py +++ b/bench/report.py @@ -10,8 +10,12 @@ import re import shutil from dataclasses import dataclass, field +from datetime import date from pathlib import Path -from typing import Any +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from bench.nightly import NightlyHistory logger = logging.getLogger(__name__) @@ -34,8 +38,7 @@ - - + @@ -48,21 +51,8 @@
    NetworkCommandConfig Mean (s) Std Dev User (s)
    - -

    Speedup Summary

    -
    - - - - - - - - - {speedup_rows} - -
    NetworkSpeedup (%)
    -
    + + {nightly_section} {graphs_section} @@ -118,9 +108,12 @@ class ReportGenerator: """Generate HTML reports from benchmark results.""" def __init__( - self, repo_url: str = "https://github.com/bitcoin-dev-tools/benchcoin" + self, + repo_url: str = "https://github.com/bitcoin-dev-tools/benchcoin", + nightly_history: NightlyHistory | None = None, ): self.repo_url = repo_url + self.nightly_history = nightly_history def generate_multi_network( self, @@ -129,6 +122,7 @@ def generate_multi_network( title: str = "Benchmark Results", pr_number: str | None = None, run_id: str | None = None, + commit: str | None = None, ) -> ReportResult: """Generate HTML report from multiple network benchmark results. @@ -138,6 +132,7 @@ def generate_multi_network( title: Title for the report pr_number: PR number (for CI reports) run_id: Run ID (for CI reports) + commit: Commit hash for PR (used in chart) Returns: ReportResult with paths and speedup data @@ -177,8 +172,8 @@ def generate_multi_network( if not all_runs: raise ValueError("No benchmark results found in any network directory") - # Calculate speedups per network - speedups = self._calculate_speedups_per_network(all_runs) + # Calculate nightly comparison (for uninstrumented configs only) + nightly_comparison = self._calculate_nightly_comparison(all_runs, commit) # Build title with PR/run info if provided full_title = title @@ -187,7 +182,7 @@ def generate_multi_network( # Generate HTML html = self._generate_html( - all_runs, speedups, full_title, output_dir, output_dir + all_runs, nightly_comparison, full_title, output_dir, output_dir, commit ) # Write report @@ -195,8 +190,8 @@ def generate_multi_network( index_file.write_text(html) logger.info(f"Generated report: {index_file}") - # Write combined results.json - combined_results = { + # Write combined results.json with nightly comparison + combined_results: dict[str, Any] = { "results": [ { "network": run.network, @@ -208,11 +203,20 @@ def generate_multi_network( } for run in all_runs ], - "speedups": speedups, } + if nightly_comparison: + combined_results["nightly_comparison"] = nightly_comparison + results_file = output_dir / "results.json" results_file.write_text(json.dumps(combined_results, indent=2)) + # Return speedups derived from nightly comparison for backwards compatibility + speedups = { + config: data["speedup_percent"] + for config, data in nightly_comparison.items() + if data.get("speedup_percent") is not None + } + return ReportResult( output_dir=output_dir, index_file=index_file, @@ -360,39 +364,83 @@ def _calculate_speedups(self, runs: list[BenchmarkRun]) -> dict[str, float]: return speedups - def _calculate_speedups_per_network( - self, runs: list[BenchmarkRun] - ) -> dict[str, float]: - """Calculate speedup percentages per network. + def _calculate_nightly_comparison( + self, runs: list[BenchmarkRun], commit: str | None = None + ) -> dict[str, dict[str, Any]]: + """Calculate comparison against nightly baseline. + + Compares PR results against the most recent nightly results for each config. + Only considers uninstrumented configs (those without '-true' suffix). - For each network, uses 'base' as baseline and calculates speedup for 'head'. - Returns a dict mapping network name to speedup percentage. + Args: + runs: List of benchmark runs + commit: PR commit hash + + Returns: + Dict mapping config to comparison data: + { + "450": { + "pr_mean": 14500.0, + "pr_stddev": 100.0, + "nightly_mean": 14800.0, + "nightly_date": "2026-01-05", + "nightly_commit": "abc123...", + "speedup_percent": 2.0 + } + } """ - speedups = {} + comparison: dict[str, dict[str, Any]] = {} - # Group runs by network - networks: dict[str, list[BenchmarkRun]] = {} - for run in runs: - if run.network not in networks: - networks[run.network] = [] - networks[run.network].append(run) + if not self.nightly_history: + logger.warning("No nightly history available for comparison") + return comparison - # Calculate speedup for each network - for network, network_runs in networks.items(): - base_mean = None - head_mean = None + # Group runs by network/config, only uninstrumented (no '-true' suffix) + for run in runs: + network = run.network - for run in network_runs: - if run.command == "base": - base_mean = run.mean - elif run.command == "head": - head_mean = run.mean + # Skip instrumented configs + if network.endswith("-true"): + continue - if base_mean and head_mean and base_mean > 0: - speedup = ((base_mean - head_mean) / base_mean) * 100 - speedups[network] = round(speedup, 1) + # Extract base config name (e.g., "450-false" -> "450") + config = network.replace("-false", "") + + # Get PR result mean + pr_mean = run.mean + pr_stddev = run.stddev + + # Get latest nightly for this config + nightly = self.nightly_history.get_latest(config) + + if nightly: + speedup = None + if nightly.mean > 0: + speedup = round(((nightly.mean - pr_mean) / nightly.mean) * 100, 1) + + comparison[config] = { + "pr_mean": pr_mean, + "pr_stddev": pr_stddev, + "pr_commit": commit, + "nightly_mean": nightly.mean, + "nightly_stddev": nightly.stddev, + "nightly_date": nightly.date, + "nightly_commit": nightly.commit, + "speedup_percent": speedup, + } + else: + # No nightly data, just record PR result + comparison[config] = { + "pr_mean": pr_mean, + "pr_stddev": pr_stddev, + "pr_commit": commit, + "nightly_mean": None, + "nightly_date": None, + "nightly_commit": None, + "speedup_percent": None, + } - return speedups + return comparison def _copy_network_artifacts( self, network: str, input_dir: Path, output_dir: Path @@ -416,30 +464,24 @@ def _copy_network_artifacts( def _generate_html( self, runs: list[BenchmarkRun], - speedups: dict[str, float], + nightly_comparison: dict[str, dict[str, Any]], title: str, input_dir: Path, output_dir: Path, + commit: str | None = None, ) -> str: """Generate the HTML report.""" - # Sort runs by network then by command (base first) - sorted_runs = sorted( - runs, - key=lambda r: (r.network, 0 if "base" in r.command.lower() else 1), - ) + # Sort runs by network + sorted_runs = sorted(runs, key=lambda r: r.network) # Generate run data rows run_data_rows = "" for run in sorted_runs: - # Create commit link if there's a commit hash in the command - command_html = self._linkify_commit(run.command) - stddev_str = f"{run.stddev:.3f}" if run.stddev else "N/A" run_data_rows += f""" {run.network} - {command_html} {run.mean:.3f} {stddev_str} {run.user:.3f} @@ -447,26 +489,10 @@ def _generate_html( """ - # Generate speedup rows - speedup_rows = "" - for name, speedup in speedups.items(): - # Skip instrumented runs in speedup summary - if name.lower().endswith("-instrumented"): - continue - - color_class = "" - if speedup > 0: - color_class = "text-green-600" - elif speedup < 0: - color_class = "text-red-600" - - sign = "+" if speedup > 0 else "" - speedup_rows += f""" - - {name} - {sign}{speedup}% - - """ + # Generate nightly comparison section + nightly_section = self._generate_nightly_section( + nightly_comparison, commit + ) # Generate graphs section graphs_section = self._generate_graphs_section(runs, input_dir, output_dir) @@ -474,10 +500,117 @@ def _generate_html( return RUN_REPORT_TEMPLATE.format( title=title, run_data_rows=run_data_rows, - speedup_rows=speedup_rows, + nightly_section=nightly_section, graphs_section=graphs_section, ) + def _generate_nightly_section( + self, + nightly_comparison: dict[str, dict[str, Any]], + commit: str | None = None, + ) -> str: + """Generate the nightly comparison section with table and chart.""" + if not nightly_comparison: + return """ +
    +

    No nightly baseline data available for comparison.

    +
    + """ + + # Build comparison table + comparison_rows = "" + has_nightly_data = False + pr_chart_data = [] + + for config, data in sorted(nightly_comparison.items()): + pr_mean = data["pr_mean"] + pr_stddev = data.get("pr_stddev") + nightly_mean = data.get("nightly_mean") + nightly_date = data.get("nightly_date") + nightly_commit = data.get("nightly_commit") + speedup = data.get("speedup_percent") + + # Format PR time + pr_minutes = pr_mean / 60 + pr_time_str = f"{pr_minutes:.1f} min" + + # Format nightly time + if nightly_mean: + has_nightly_data = True + nightly_minutes = nightly_mean / 60 + nightly_time_str = f"{nightly_minutes:.1f} min" + nightly_info = f"{nightly_time_str} ({nightly_date})" + + # Format speedup + if speedup is not None: + color_class = "" + if speedup > 0: + color_class = "text-green-600" + elif speedup < 0: + color_class = "text-red-600" + sign = "+" if speedup > 0 else "" + speedup_str = f'{sign}{speedup}%' + else: + speedup_str = "N/A" + else: + nightly_info = "No baseline" + speedup_str = "N/A" + + # Config display name + config_name = "450 MB" if config == "450" else "32 GB" + + comparison_rows += f""" + + {config_name} + {pr_time_str} + {nightly_info} + {speedup_str} + + """ + + # Collect data for chart + if nightly_mean: + pr_chart_data.append({ + "config": config, + "mean": pr_mean, + "stddev": pr_stddev or 0, + "commit": commit or "unknown", + "date": date.today().isoformat(), + }) + + # Build comparison table HTML + table_html = f""" +

    Comparison to Nightly Master

    +
    + + + + + + + + + + + {comparison_rows} + +
    ConfigPR TimeNightly Time (Date)Change
    +
    + """ + + # Add chart if we have nightly data + chart_html = "" + if has_nightly_data and self.nightly_history and pr_chart_data: + from bench.nightly import generate_pr_chart_snippet + chart_html = f""" +

    Performance Trend

    +
    + {generate_pr_chart_snippet(self.nightly_history, pr_chart_data)} +
    + """ + + return table_html + chart_html + def _linkify_commit(self, command: str) -> str: """Convert commit hashes in command to links.""" @@ -603,9 +736,15 @@ class ReportPhase: """Generate reports from benchmark results.""" def __init__( - self, repo_url: str = "https://github.com/bitcoin-dev-tools/benchcoin" + self, + repo_url: str = "https://github.com/bitcoin-dev-tools/benchcoin", + nightly_history_file: Path | None = None, ): - self.generator = ReportGenerator(repo_url) + nightly_history: NightlyHistory | None = None + if nightly_history_file and nightly_history_file.exists(): + from bench.nightly import NightlyHistory + nightly_history = NightlyHistory(nightly_history_file) + self.generator = ReportGenerator(repo_url, nightly_history) def run( self, @@ -632,6 +771,7 @@ def run_multi_network( title: str = "Benchmark Results", pr_number: str | None = None, run_id: str | None = None, + commit: str | None = None, ) -> ReportResult: """Generate report from multiple network benchmark results. @@ -641,12 +781,13 @@ def run_multi_network( title: Title for the report pr_number: PR number (for CI reports) run_id: Run ID (for CI reports) + commit: Commit hash for PR Returns: ReportResult with paths and speedup data """ return self.generator.generate_multi_network( - network_dirs, output_dir, title, pr_number, run_id + network_dirs, output_dir, title, pr_number, run_id, commit ) def update_index(self, results_dir: Path, output_file: Path) -> None: From 737ba9870a2a6879ed2cabc9075a1f9c54e80d47 Mon Sep 17 00:00:00 2001 From: will Date: Wed, 7 Jan 2026 10:00:20 +0000 Subject: [PATCH 03/46] only run single bins in prs --- bench.py | 166 +++++++++++------------------------------ bench/benchmark.py | 122 +++++++++++++----------------- bench/build.py | 96 ++++++++---------------- bench/compare.py | 180 --------------------------------------------- bench/report.py | 70 ++++++------------ justfile | 94 +++++++++++------------ 6 files changed, 193 insertions(+), 535 deletions(-) delete mode 100644 bench/compare.py diff --git a/bench.py b/bench.py index b64b89cc316b..35c142006005 100755 --- a/bench.py +++ b/bench.py @@ -2,32 +2,28 @@ """Benchcoin - Bitcoin Core benchmarking toolkit. A CLI for building, benchmarking, analyzing, and reporting on Bitcoin Core -performance. +performance. PR results are compared against nightly baseline data. Usage: - bench.py build COMMIT[:NAME]... Build bitcoind at one or more commits - bench.py run NAME:BINARY... Benchmark one or more binaries - bench.py analyze COMMIT LOGFILE Generate plots from debug.log - bench.py compare RESULTS... Compare benchmark results - bench.py report INPUT OUTPUT Generate HTML report - bench.py nightly append ... Append result to nightly history - bench.py nightly chart ... Generate nightly chart HTML + bench.py build COMMIT Build bitcoind at a commit + bench.py run NAME:BINARY Benchmark a binary + bench.py analyze COMMIT LOGFILE Generate plots from debug.log + bench.py report OUTPUT Generate HTML report with nightly comparison + bench.py nightly append ... Append result to nightly history + bench.py nightly chart ... Generate nightly chart HTML Examples: - # Build two commits - bench.py build HEAD~1:before HEAD:after + # Build at HEAD + bench.py build HEAD:pr - # Benchmark built binaries - bench.py run before:./binaries/before/bitcoind after:./binaries/after/bitcoind --datadir /data + # Benchmark built binary + bench.py run pr:./binaries/pr/bitcoind --datadir /data - # Compare results - bench.py compare ./bench-output/results.json - - # Generate HTML report - bench.py report ./bench-output ./report + # Generate HTML report with nightly comparison + bench.py report --network 450-false:./results --nightly-history ./nightly-history.json ./output # Append nightly result and regenerate chart - bench.py nightly append results.json abc123 default 450 + bench.py nightly append results.json abc123 450 450 bench.py nightly chart ./index.html """ @@ -49,7 +45,7 @@ def cmd_build(args: argparse.Namespace) -> int: - """Build bitcoind at one or more commits.""" + """Build bitcoind at a commit.""" from bench.build import BuildPhase capabilities = detect_capabilities() @@ -71,12 +67,10 @@ def cmd_build(args: argparse.Namespace) -> int: try: result = phase.run( - args.commits, + args.commit, output_dir=Path(args.output_dir) if args.output_dir else None, ) - logger.info(f"Built {len(result.binaries)} binary(ies):") - for binary in result.binaries: - logger.info(f" {binary.name}: {binary.path}") + logger.info(f"Built binary: {result.binary.name} at {result.binary.path}") return 0 except Exception as e: logger.error(f"Build failed: {e}") @@ -84,7 +78,7 @@ def cmd_build(args: argparse.Namespace) -> int: def cmd_run(args: argparse.Namespace) -> int: - """Run benchmark on one or more binaries.""" + """Run benchmark on a binary.""" from bench.benchmark import BenchmarkPhase, parse_binary_spec from bench.benchmark_config import BenchmarkConfig @@ -143,46 +137,43 @@ def cmd_run(args: argparse.Namespace) -> int: logger.error(error) return 1 - # Parse binary specs + # Parse binary spec try: - binaries = [parse_binary_spec(spec) for spec in args.binaries] + binary = parse_binary_spec(args.binary) except ValueError as e: logger.error(str(e)) return 1 - # Validate binaries exist - for name, path in binaries: - if not path.exists(): - logger.error(f"Binary not found: {path} ({name})") - return 1 + # Validate binary exists + name, path = binary + if not path.exists(): + logger.error(f"Binary not found: {path} ({name})") + return 1 phase = BenchmarkPhase(config, capabilities, benchmark_config) output_dir = Path(config.output_dir) try: result = phase.run( - binaries=binaries, + binary=binary, datadir=Path(config.datadir) if config.datadir else None, output_dir=output_dir, ) logger.info(f"Results saved to: {result.results_file}") # For instrumented runs, also generate plots - if config.instrumented: + if config.instrumented and result.debug_log: from bench.analyze import AnalyzePhase analyze_phase = AnalyzePhase() - - for binary_result in result.binaries: - if binary_result.debug_log: - try: - analyze_phase.run( - commit=binary_result.name, - log_file=binary_result.debug_log, - output_dir=output_dir / "plots", - ) - except Exception as e: - logger.warning(f"Analysis for {binary_result.name} failed: {e}") + try: + analyze_phase.run( + commit=result.name, + log_file=result.debug_log, + output_dir=output_dir / "plots", + ) + except Exception as e: + logger.warning(f"Analysis failed: {e}") return 0 except Exception as e: @@ -194,46 +185,6 @@ def cmd_run(args: argparse.Namespace) -> int: return 1 -def cmd_compare(args: argparse.Namespace) -> int: - """Compare benchmark results from multiple files.""" - from bench.compare import ComparePhase - - if args.verbose: - logging.getLogger().setLevel(logging.DEBUG) - - results_files = [Path(f) for f in args.results_files] - - # Validate files exist - for f in results_files: - if not f.exists(): - logger.error(f"Results file not found: {f}") - return 1 - - phase = ComparePhase() - - try: - result = phase.run(results_files, baseline=args.baseline) - - # Output results - output_json = phase.to_json(result) - - if args.output: - output_path = Path(args.output) - output_path.write_text(output_json) - logger.info(f"Comparison saved to: {output_path}") - else: - print(output_json) - - return 0 - except Exception as e: - logger.error(f"Comparison failed: {e}") - if args.verbose: - import traceback - - traceback.print_exc() - return 1 - - def cmd_analyze(args: argparse.Namespace) -> int: """Generate plots from debug.log.""" from bench.analyze import AnalyzePhase @@ -420,15 +371,14 @@ def main() -> int: # Build command build_parser = subparsers.add_parser( "build", - help="Build bitcoind at one or more commits", - description="Build bitcoind binaries from git commits. " - "Each commit can optionally have a name suffix: COMMIT:NAME", + help="Build bitcoind at a commit", + description="Build bitcoind binary from a git commit. " + "Optionally provide a name suffix: COMMIT:NAME", ) build_parser.add_argument( - "commits", - nargs="+", + "commit", metavar="COMMIT[:NAME]", - help="Commit(s) to build. Format: COMMIT or COMMIT:NAME (e.g., HEAD:latest, abc123:v27)", + help="Commit to build. Format: COMMIT or COMMIT:NAME (e.g., HEAD:pr, abc123:test)", ) build_parser.add_argument( "-o", @@ -446,15 +396,13 @@ def main() -> int: # Run command run_parser = subparsers.add_parser( "run", - help="Run benchmark on one or more binaries", - description="Benchmark bitcoind binaries using hyperfine. " - "Each binary must have a name and path: NAME:PATH", + help="Run benchmark on a binary", + description="Benchmark a bitcoind binary using hyperfine.", ) run_parser.add_argument( - "binaries", - nargs="+", + "binary", metavar="NAME:PATH", - help="Binary(ies) to benchmark. Format: NAME:PATH (e.g., v27:./binaries/v27/bitcoind)", + help="Binary to benchmark. Format: NAME:PATH (e.g., pr:./binaries/pr/bitcoind)", ) run_parser.add_argument( "--datadir", @@ -505,32 +453,6 @@ def main() -> int: ) analyze_parser.set_defaults(func=cmd_analyze) - # Compare command - compare_parser = subparsers.add_parser( - "compare", - help="Compare benchmark results from multiple files", - description="Load and compare results from one or more results.json files. " - "Calculates speedup percentages relative to a baseline.", - ) - compare_parser.add_argument( - "results_files", - nargs="+", - metavar="RESULTS_FILE", - help="results.json file(s) to compare", - ) - compare_parser.add_argument( - "--baseline", - metavar="NAME", - help="Name of the baseline entry (default: first entry)", - ) - compare_parser.add_argument( - "-o", - "--output", - metavar="FILE", - help="Output file for comparison JSON (default: stdout)", - ) - compare_parser.set_defaults(func=cmd_compare) - # Report command report_parser = subparsers.add_parser( "report", diff --git a/bench/benchmark.py b/bench/benchmark.py index 60dddb5db725..1989c1ddcc79 100644 --- a/bench/benchmark.py +++ b/bench/benchmark.py @@ -1,4 +1,4 @@ -"""Benchmark phase - run hyperfine benchmarks on bitcoind binaries.""" +"""Benchmark phase - run hyperfine benchmark on a bitcoind binary.""" from __future__ import annotations @@ -7,7 +7,7 @@ import shutil import subprocess import tempfile -from dataclasses import dataclass, field +from dataclasses import dataclass from pathlib import Path from typing import TYPE_CHECKING @@ -21,18 +21,6 @@ logger = logging.getLogger(__name__) -# Debug flags for instrumented mode -INSTRUMENTED_DEBUG_FLAGS = ["coindb", "leveldb", "bench", "validation"] - - -@dataclass -class BinaryResult: - """Result for a single binary.""" - - name: str - flamegraph: Path | None = None - debug_log: Path | None = None - @dataclass class BenchmarkResult: @@ -40,7 +28,9 @@ class BenchmarkResult: results_file: Path instrumented: bool - binaries: list[BinaryResult] = field(default_factory=list) + name: str + flamegraph: Path | None = None + debug_log: Path | None = None def parse_binary_spec(spec: str) -> tuple[str, Path]: @@ -57,7 +47,7 @@ def parse_binary_spec(spec: str) -> tuple[str, Path]: class BenchmarkPhase: - """Run hyperfine benchmarks on bitcoind binaries.""" + """Run hyperfine benchmark on a bitcoind binary.""" def __init__( self, @@ -72,32 +62,31 @@ def __init__( def run( self, - binaries: list[tuple[str, Path]], + binary: tuple[str, Path], datadir: Path | None, output_dir: Path, ) -> BenchmarkResult: - """Run benchmarks on given binaries. + """Run benchmark on given binary. Args: - binaries: List of (name, binary_path) tuples + binary: Tuple of (name, binary_path) datadir: Source datadir with blockchain snapshot (None for fresh sync) output_dir: Where to store results Returns: BenchmarkResult with paths to outputs """ - if not binaries: - raise ValueError("At least one binary is required") + name, binary_path = binary - # Validate all binaries exist - for name, path in binaries: - if not path.exists(): - raise FileNotFoundError(f"Binary not found: {path} ({name})") + # Validate binary exists + if not binary_path.exists(): + raise FileNotFoundError(f"Binary not found: {binary_path} ({name})") - # Ensure binaries can run on this system (patches guix binaries on NixOS) - for name, path in binaries: - if not ensure_binary_runnable(path): - raise RuntimeError(f"Binary {name} at {path} cannot be made runnable") + # Ensure binary can run on this system (patches guix binaries on NixOS) + if not ensure_binary_runnable(binary_path): + raise RuntimeError( + f"Binary {name} at {binary_path} cannot be made runnable" + ) # Check prerequisites errors = self.capabilities.check_for_run(self.config.instrumented) @@ -122,9 +111,7 @@ def run( logger.info(f" Source datadir: {datadir}") else: logger.info(" Mode: Fresh sync (no source datadir)") - logger.info(f" Binaries: {len(binaries)}") - for name, path in binaries: - logger.info(f" {name}: {path}") + logger.info(f" Binary: {name} at {binary_path}") logger.info(f" Instrumented: {self.config.instrumented}") logger.info(f" Runs: {self.config.runs}") logger.info(f" dbcache: {self.config.dbcache}") @@ -139,7 +126,8 @@ def run( # Build hyperfine command cmd = self._build_hyperfine_cmd( - binaries=binaries, + name=name, + binary_path=binary_path, tmp_datadir=tmp_datadir, results_file=results_file, setup_script=setup_script, @@ -148,55 +136,49 @@ def run( output_dir=output_dir, ) - # Log the commands being benchmarked - logger.info("Commands to benchmark:") - for name, path in binaries: - bitcoind_cmd = self._build_bitcoind_cmd(path, tmp_datadir) - logger.info(f" {name}: {bitcoind_cmd}") + # Log the command being benchmarked + bitcoind_cmd = self._build_bitcoind_cmd(binary_path, tmp_datadir) + logger.info(f"Command to benchmark: {bitcoind_cmd}") if self.config.dry_run: logger.info(f"[DRY RUN] Would run: {' '.join(cmd)}") return BenchmarkResult( results_file=results_file, instrumented=self.config.instrumented, + name=name, ) # Log the full hyperfine command logger.info("Running hyperfine...") - logger.info(f" Command: {' '.join(cmd[:7])} ...") # First few args logger.debug(f" Full command: {' '.join(cmd)}") subprocess.run(cmd, check=True) - # Collect results - benchmark_result = BenchmarkResult( + # Collect result + result = BenchmarkResult( results_file=results_file, instrumented=self.config.instrumented, + name=name, ) - # For instrumented runs, collect flamegraphs and debug logs + # For instrumented runs, collect flamegraph and debug log if self.config.instrumented: logger.info("Collecting instrumented artifacts...") - for name, _path in binaries: - binary_result = BinaryResult(name=name) - - flamegraph_file = output_dir / f"{name}-flamegraph.svg" - debug_log_file = output_dir / f"{name}-debug.log" - - if flamegraph_file.exists(): - binary_result.flamegraph = flamegraph_file - logger.info(f" Flamegraph ({name}): {flamegraph_file}") - if debug_log_file.exists(): - binary_result.debug_log = debug_log_file - logger.info(f" Debug log ({name}): {debug_log_file}") + flamegraph_file = output_dir / f"{name}-flamegraph.svg" + debug_log_file = output_dir / f"{name}-debug.log" - benchmark_result.binaries.append(binary_result) + if flamegraph_file.exists(): + result.flamegraph = flamegraph_file + logger.info(f" Flamegraph: {flamegraph_file}") + if debug_log_file.exists(): + result.debug_log = debug_log_file + logger.info(f" Debug log: {debug_log_file}") # Clean up tmp_datadir if tmp_datadir.exists(): logger.debug(f"Cleaning up tmp_datadir: {tmp_datadir}") shutil.rmtree(tmp_datadir) - return benchmark_result + return result finally: # Clean up temp scripts @@ -254,7 +236,7 @@ def _create_prepare_script( return self._create_temp_script(commands, "prepare") def _create_cleanup_script(self, tmp_datadir: Path) -> Path: - """Create cleanup script (runs after all timing runs for each command).""" + """Create cleanup script (runs after all timing runs).""" commands = [ f'rm -rf "{tmp_datadir}"/*', ] @@ -301,7 +283,8 @@ def _build_bitcoind_cmd( def _build_hyperfine_cmd( self, - binaries: list[tuple[str, Path]], + name: str, + binary_path: Path, tmp_datadir: Path, results_file: Path, setup_script: Path, @@ -319,22 +302,18 @@ def _build_hyperfine_cmd( f"--runs={self.config.runs}", f"--export-json={results_file}", "--show-output", + f"--command-name={name}", ] - # Add command names and build commands - for name, binary_path in binaries: - cmd.append(f"--command-name={name}") - - # Build the actual commands to benchmark - for name, binary_path in binaries: - bitcoind_cmd = self._build_bitcoind_cmd(binary_path, tmp_datadir) + # Build the actual command to benchmark + bitcoind_cmd = self._build_bitcoind_cmd(binary_path, tmp_datadir) - # For instrumented runs, append the conclude logic to each command - if self.config.instrumented: - conclude = self._create_conclude_commands(name, tmp_datadir, output_dir) - bitcoind_cmd += f" && {conclude}" + # For instrumented runs, append the conclude logic + if self.config.instrumented: + conclude = self._create_conclude_commands(name, tmp_datadir, output_dir) + bitcoind_cmd += f" && {conclude}" - cmd.append(bitcoind_cmd) + cmd.append(bitcoind_cmd) return cmd @@ -344,8 +323,7 @@ def _create_conclude_commands( tmp_datadir: Path, output_dir: Path, ) -> str: - """Create inline conclude commands for a specific binary.""" - # Return shell commands to run after each benchmark + """Create inline conclude commands for the binary.""" commands = [] # Move flamegraph if exists diff --git a/bench/build.py b/bench/build.py index 6187263a73de..7e51e2140030 100644 --- a/bench/build.py +++ b/bench/build.py @@ -1,4 +1,4 @@ -"""Build phase - compile bitcoind at specified commits.""" +"""Build phase - compile bitcoind at a specified commit.""" from __future__ import annotations @@ -20,7 +20,7 @@ @dataclass class BuiltBinary: - """A single built binary.""" + """A built binary.""" name: str path: Path @@ -31,7 +31,7 @@ class BuiltBinary: class BuildResult: """Result of the build phase.""" - binaries: list[BuiltBinary] + binary: BuiltBinary def parse_commit_spec(spec: str) -> tuple[str, str | None]: @@ -46,7 +46,7 @@ def parse_commit_spec(spec: str) -> tuple[str, str | None]: class BuildPhase: - """Build bitcoind binaries at specified commits.""" + """Build bitcoind binary at a specified commit.""" def __init__( self, @@ -60,17 +60,17 @@ def __init__( def run( self, - commit_specs: list[str], + commit_spec: str, output_dir: Path | None = None, ) -> BuildResult: - """Build bitcoind at given commits. + """Build bitcoind at given commit. Args: - commit_specs: List of commit specs like 'abc123:name' or 'abc123' - output_dir: Where to store binaries (default: ./binaries) + commit_spec: Commit spec like 'abc123:name' or 'abc123' + output_dir: Where to store binary (default: ./binaries) Returns: - BuildResult with list of built binaries + BuildResult with the built binary """ # Check prerequisites errors = self.capabilities.check_for_build() @@ -79,78 +79,46 @@ def run( output_dir = output_dir or Path(self.config.binaries_dir) - # Parse commit specs and resolve to full hashes - commits: list[tuple[str, str, str]] = [] # (commit_hash, name, original_spec) - for spec in commit_specs: - commit, name = parse_commit_spec(spec) - commit_hash = git_rev_parse(commit, self.repo_path) - # Default name to short hash if not provided - if name is None: - name = commit_hash[:12] - commits.append((commit_hash, name, spec)) - - logger.info(f"Building {len(commits)} binary(ies):") - for commit_hash, name, spec in commits: - logger.info(f" {name}: {commit_hash[:12]} ({spec})") + # Parse commit spec and resolve to full hash + commit, name = parse_commit_spec(commit_spec) + commit_hash = git_rev_parse(commit, self.repo_path) + + # Default name to short hash if not provided + if name is None: + name = commit_hash[:12] + + logger.info(f"Building binary: {name} ({commit_hash[:12]})") logger.info(f" Repo: {self.repo_path}") logger.info(f" Output: {output_dir}") - # Check if we can skip existing builds - binaries_to_build: list[ - tuple[str, str, Path] - ] = [] # (commit_hash, name, output_path) - for commit_hash, name, _spec in commits: - binary_dir = output_dir / name - binary_dir.mkdir(parents=True, exist_ok=True) - binary_path = binary_dir / "bitcoind" - - if self.config.skip_existing and binary_path.exists(): - logger.info(f" Skipping {name} - binary exists") - else: - binaries_to_build.append((commit_hash, name, binary_path)) - - if not binaries_to_build: - logger.info("All binaries exist and --skip-existing set, skipping build") + # Setup output path + binary_dir = output_dir / name + binary_dir.mkdir(parents=True, exist_ok=True) + binary_path = binary_dir / "bitcoind" + + # Check if we can skip existing build + if self.config.skip_existing and binary_path.exists(): + logger.info(f" Skipping {name} - binary exists") return BuildResult( - binaries=[ - BuiltBinary( - name=name, - path=output_dir / name / "bitcoind", - commit=commit_hash, - ) - for commit_hash, name, _spec in commits - ] + binary=BuiltBinary(name=name, path=binary_path, commit=commit_hash) ) # Save git state for restoration git_state = GitState(self.repo_path) git_state.save() - built_binaries: list[BuiltBinary] = [] - try: - for commit_hash, name, output_path in binaries_to_build: - self._build_commit(name, commit_hash, output_path) - built_binaries.append( - BuiltBinary(name=name, path=output_path, commit=commit_hash) - ) - + self._build_commit(name, commit_hash, binary_path) finally: # Always restore git state git_state.restore() - # Include skipped binaries in result - all_binaries = [] - for commit_hash, name, _spec in commits: - binary_path = output_dir / name / "bitcoind" - all_binaries.append( - BuiltBinary(name=name, path=binary_path, commit=commit_hash) - ) - - return BuildResult(binaries=all_binaries) + return BuildResult( + binary=BuiltBinary(name=name, path=binary_path, commit=commit_hash) + ) def _build_commit(self, name: str, commit: str, output_path: Path) -> None: - """Build bitcoind for a single commit.""" + """Build bitcoind for a commit.""" logger.info(f"Building {name} ({commit[:12]})") if self.config.dry_run: diff --git a/bench/compare.py b/bench/compare.py deleted file mode 100644 index fac328841634..000000000000 --- a/bench/compare.py +++ /dev/null @@ -1,180 +0,0 @@ -"""Compare phase - compare benchmark results from multiple runs.""" - -from __future__ import annotations - -import json -import logging -from dataclasses import dataclass -from pathlib import Path - -logger = logging.getLogger(__name__) - - -@dataclass -class BenchmarkEntry: - """A single benchmark entry from results.json.""" - - command: str - mean: float - stddev: float | None - user: float - system: float - min: float - max: float - times: list[float] - - -@dataclass -class Comparison: - """Comparison of one entry against the baseline.""" - - name: str - mean: float - baseline_mean: float - speedup_percent: float - stddev: float | None - - -@dataclass -class CompareResult: - """Result of comparison.""" - - baseline: str - comparisons: list[Comparison] - - -class ComparePhase: - """Compare benchmark results from multiple results.json files.""" - - def run( - self, - results_files: list[Path], - baseline: str | None = None, - ) -> CompareResult: - """Compare benchmark results. - - Args: - results_files: List of results.json files to compare - baseline: Name of the baseline entry (default: first entry) - - Returns: - CompareResult with comparison data - """ - if not results_files: - raise ValueError("At least one results file is required") - - # Load all entries from all files - all_entries: list[BenchmarkEntry] = [] - for results_file in results_files: - if not results_file.exists(): - raise FileNotFoundError(f"Results file not found: {results_file}") - - logger.info(f"Loading results from: {results_file}") - with open(results_file) as f: - data = json.load(f) - - entries = self._parse_results(data) - logger.info(f" Found {len(entries)} entries") - all_entries.extend(entries) - - if not all_entries: - raise ValueError("No benchmark entries found in results files") - - # Determine baseline - if baseline is None: - baseline = all_entries[0].command - logger.info(f"Using baseline: {baseline}") - - # Find baseline entry - baseline_entry = None - for entry in all_entries: - if entry.command == baseline: - baseline_entry = entry - break - - if baseline_entry is None: - available = [e.command for e in all_entries] - raise ValueError( - f"Baseline '{baseline}' not found. Available: {', '.join(available)}" - ) - - # Calculate comparisons - comparisons: list[Comparison] = [] - for entry in all_entries: - if entry.command == baseline: - continue - - speedup = self._calculate_speedup(baseline_entry.mean, entry.mean) - comparisons.append( - Comparison( - name=entry.command, - mean=entry.mean, - baseline_mean=baseline_entry.mean, - speedup_percent=speedup, - stddev=entry.stddev, - ) - ) - - # Log results - logger.info("Comparison results:") - logger.info(f" Baseline ({baseline}): {baseline_entry.mean:.3f}s") - for comp in comparisons: - sign = "+" if comp.speedup_percent > 0 else "" - logger.info( - f" {comp.name}: {comp.mean:.3f}s ({sign}{comp.speedup_percent:.1f}%)" - ) - - return CompareResult( - baseline=baseline, - comparisons=comparisons, - ) - - def _parse_results(self, data: dict) -> list[BenchmarkEntry]: - """Parse results from hyperfine JSON output.""" - entries = [] - - results = data.get("results", []) - for result in results: - entries.append( - BenchmarkEntry( - command=result.get("command", "unknown"), - mean=result.get("mean", 0), - stddev=result.get("stddev"), - user=result.get("user", 0), - system=result.get("system", 0), - min=result.get("min", 0), - max=result.get("max", 0), - times=result.get("times", []), - ) - ) - - return entries - - def _calculate_speedup(self, baseline_mean: float, other_mean: float) -> float: - """Calculate speedup percentage. - - Positive = faster than baseline - Negative = slower than baseline - """ - if baseline_mean == 0: - return 0.0 - return round(((baseline_mean - other_mean) / baseline_mean) * 100, 1) - - def to_json(self, result: CompareResult) -> str: - """Convert comparison result to JSON.""" - return json.dumps( - { - "baseline": result.baseline, - "comparisons": [ - { - "name": c.name, - "mean": c.mean, - "baseline_mean": c.baseline_mean, - "speedup_percent": c.speedup_percent, - "stddev": c.stddev, - } - for c in result.comparisons - ], - }, - indent=2, - ) diff --git a/bench/report.py b/bench/report.py index 785a77f40fa4..8ee068b79a44 100644 --- a/bench/report.py +++ b/bench/report.py @@ -229,7 +229,7 @@ def generate( output_dir: Path, title: str = "Benchmark Results", ) -> ReportResult: - """Generate HTML report from benchmark artifacts. + """Generate HTML report from benchmark artifacts (single binary mode). Args: input_dir: Directory containing results.json and artifacts @@ -237,7 +237,7 @@ def generate( title: Title for the report Returns: - ReportResult with paths and speedup data + ReportResult with paths """ output_dir.mkdir(parents=True, exist_ok=True) @@ -252,11 +252,8 @@ def generate( # Parse results runs = self._parse_results(data) - # Calculate speedups - speedups = self._calculate_speedups(runs) - - # Generate HTML - html = self._generate_html(runs, speedups, title, input_dir, output_dir) + # Generate HTML (no nightly comparison in single-directory mode) + html = self._generate_html(runs, {}, title, input_dir, output_dir) # Write report index_file = output_dir / "index.html" @@ -269,7 +266,7 @@ def generate( return ReportResult( output_dir=output_dir, index_file=index_file, - speedups=speedups, + speedups={}, ) def generate_index( @@ -337,33 +334,6 @@ def _parse_results(self, data: dict) -> list[BenchmarkRun]: return runs - def _calculate_speedups(self, runs: list[BenchmarkRun]) -> dict[str, float]: - """Calculate speedup percentages. - - Uses the first entry as baseline and compares all others against it. - Returns a dict mapping command name to speedup percentage. - """ - speedups = {} - - if len(runs) < 2: - return speedups - - # Use first run as baseline - baseline = runs[0] - baseline_mean = baseline.mean - - if baseline_mean <= 0: - return speedups - - # Calculate speedup for each other run - for run in runs[1:]: - speedup = ((baseline_mean - run.mean) / baseline_mean) * 100 - # Use command name as key, extracting just the name part - name = run.command - speedups[name] = round(speedup, 1) - - return speedups - def _calculate_nightly_comparison( self, runs: list[BenchmarkRun], commit: str | None = None ) -> dict[str, dict[str, Any]]: @@ -490,9 +460,7 @@ def _generate_html( """ # Generate nightly comparison section - nightly_section = self._generate_nightly_section( - nightly_comparison, commit - ) + nightly_section = self._generate_nightly_section(nightly_comparison, commit) # Generate graphs section graphs_section = self._generate_graphs_section(runs, input_dir, output_dir) @@ -539,7 +507,13 @@ def _generate_nightly_section( has_nightly_data = True nightly_minutes = nightly_mean / 60 nightly_time_str = f"{nightly_minutes:.1f} min" - nightly_info = f"{nightly_time_str} ({nightly_date})" + # Include commit link if available + if nightly_commit: + short_commit = nightly_commit[:7] + commit_link = f'{short_commit}' + nightly_info = f"{nightly_time_str} ({nightly_date}, {commit_link})" + else: + nightly_info = f"{nightly_time_str} ({nightly_date})" # Format speedup if speedup is not None: @@ -570,13 +544,15 @@ def _generate_nightly_section( # Collect data for chart if nightly_mean: - pr_chart_data.append({ - "config": config, - "mean": pr_mean, - "stddev": pr_stddev or 0, - "commit": commit or "unknown", - "date": date.today().isoformat(), - }) + pr_chart_data.append( + { + "config": config, + "mean": pr_mean, + "stddev": pr_stddev or 0, + "commit": commit or "unknown", + "date": date.today().isoformat(), + } + ) # Build comparison table HTML table_html = f""" @@ -602,6 +578,7 @@ def _generate_nightly_section( chart_html = "" if has_nightly_data and self.nightly_history and pr_chart_data: from bench.nightly import generate_pr_chart_snippet + chart_html = f"""

    Performance Trend

    @@ -743,6 +720,7 @@ def __init__( nightly_history: NightlyHistory | None = None if nightly_history_file and nightly_history_file.exists(): from bench.nightly import NightlyHistory + nightly_history = NightlyHistory(nightly_history_file) self.generator = ReportGenerator(repo_url, nightly_history) diff --git a/justfile b/justfile index d128c7e8b195..f38282e5d981 100644 --- a/justfile +++ b/justfile @@ -9,92 +9,84 @@ default: # Test instrumented run using signet (includes report generation) [group('local')] -test-instrumented base head datadir: - nix develop --command python3 bench.py build --skip-existing {{ base }}:base {{ head }}:head +test-instrumented commit datadir: + nix develop --command python3 bench.py build --skip-existing {{ commit }}:pr nix develop --command python3 bench.py --profile quick run \ - --chain signet \ - --instrumented \ + --benchmark-config bench/configs/pr.toml \ + --matrix-entry 450-true \ --datadir {{ datadir }} \ - base:./binaries/base/bitcoind \ - head:./binaries/head/bitcoind + pr:./binaries/pr/bitcoind nix develop --command python3 bench.py report bench-output/ bench-output/ # Test uninstrumented run using signet [group('local')] -test-uninstrumented base head datadir: - nix develop --command python3 bench.py build --skip-existing {{ base }}:base {{ head }}:head +test-uninstrumented commit datadir: + nix develop --command python3 bench.py build --skip-existing {{ commit }}:pr nix develop --command python3 bench.py --profile quick run \ - --chain signet \ + --benchmark-config bench/configs/pr.toml \ + --matrix-entry 450-false \ --datadir {{ datadir }} \ - base:./binaries/base/bitcoind \ - head:./binaries/head/bitcoind + pr:./binaries/pr/bitcoind # Full benchmark with instrumentation (flamegraphs + plots) [group('local')] -instrumented base head datadir: - python3 bench.py build {{ base }}:base {{ head }}:head - python3 bench.py --profile quick run \ - --instrumented \ +instrumented commit datadir: + python3 bench.py build {{ commit }}:pr + python3 bench.py run \ + --benchmark-config bench/configs/pr.toml \ + --matrix-entry 450-true \ --datadir {{ datadir }} \ - base:./binaries/base/bitcoind \ - head:./binaries/head/bitcoind + pr:./binaries/pr/bitcoind -# Just build binaries (useful for incremental testing) +# Just build a binary (useful for incremental testing) [group('local')] -build *commits: - python3 bench.py build {{ commits }} +build commit: + python3 bench.py build {{ commit }} -# Run benchmark with pre-built binaries +# Run benchmark with pre-built binary [group('local')] -run datadir *binaries: - python3 bench.py run --datadir {{ datadir }} {{ binaries }} +run datadir binary: + python3 bench.py run \ + --benchmark-config bench/configs/pr.toml \ + --matrix-entry 450-false \ + --datadir {{ datadir }} \ + {{ binary }} # Generate plots from a debug.log file [group('local')] analyze commit logfile output_dir="./plots": python3 bench.py analyze {{ commit }} {{ logfile }} --output-dir {{ output_dir }} -# Compare benchmark results -[group('local')] -compare *results_files: - python3 bench.py compare {{ results_files }} - # Generate HTML report from benchmark results [group('local')] -report input_dir output_dir: - python3 bench.py report {{ input_dir }} {{ output_dir }} +report input_dir output_dir nightly_history="": + #!/usr/bin/env bash + set -euo pipefail + if [ -n "{{ nightly_history }}" ]; then + python3 bench.py report {{ input_dir }} {{ output_dir }} --nightly-history {{ nightly_history }} + else + python3 bench.py report {{ input_dir }} {{ output_dir }} + fi # ============================================================================ # CI commands (called by GitHub Actions) # ============================================================================ -# Build binaries for CI +# Build binary for CI [group('ci')] -ci-build base_commit head_commit binaries_dir: - python3 bench.py build -o {{ binaries_dir }} {{ base_commit }}:base {{ head_commit }}:head - -# Run uninstrumented benchmarks for CI -[group('ci')] -ci-run datadir tmp_datadir output_dir dbcache binaries_dir: - python3 bench.py --profile ci run \ - --datadir {{ datadir }} \ - --tmp-datadir {{ tmp_datadir }} \ - --output-dir {{ output_dir }} \ - --dbcache {{ dbcache }} \ - base:{{ binaries_dir }}/base/bitcoind \ - head:{{ binaries_dir }}/head/bitcoind +ci-build commit binaries_dir: + python3 bench.py build -o {{ binaries_dir }} {{ commit }}:pr -# Run instrumented benchmarks for CI +# Run benchmark for CI [group('ci')] -ci-run-instrumented datadir tmp_datadir output_dir dbcache binaries_dir: - python3 bench.py --profile ci run \ - --instrumented \ +ci-run benchmark_config matrix_entry datadir tmp_datadir output_dir binaries_dir: + python3 bench.py run \ + --benchmark-config {{ benchmark_config }} \ + --matrix-entry {{ matrix_entry }} \ --datadir {{ datadir }} \ --tmp-datadir {{ tmp_datadir }} \ --output-dir {{ output_dir }} \ - --dbcache {{ dbcache }} \ - base:{{ binaries_dir }}/base/bitcoind \ - head:{{ binaries_dir }}/head/bitcoind + pr:{{ binaries_dir }}/pr/bitcoind # ============================================================================ # Git helpers From 28ac510bb57b75b48b066827cdd726fc6c8a47ed Mon Sep 17 00:00:00 2001 From: will Date: Thu, 8 Jan 2026 10:52:52 +0000 Subject: [PATCH 04/46] rebase at 0100 GMT --- .github/workflows/rebase.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/rebase.yml b/.github/workflows/rebase.yml index bd721c123302..30c6f045fbc4 100644 --- a/.github/workflows/rebase.yml +++ b/.github/workflows/rebase.yml @@ -2,7 +2,7 @@ name: Nightly Rebase on: schedule: - - cron: '0 5 * * *' # 05:00 GMT daily + - cron: '0 1 * * *' # 01:00 GMT daily workflow_dispatch: # manual trigger permissions: From c00c1689963099e98621a235bd1627eb8c750d87 Mon Sep 17 00:00:00 2001 From: will Date: Thu, 8 Jan 2026 10:53:02 +0000 Subject: [PATCH 05/46] make charts taller --- bench/nightly.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bench/nightly.py b/bench/nightly.py index 15367aed7f6b..671adf2ca248 100644 --- a/bench/nightly.py +++ b/bench/nightly.py @@ -421,7 +421,7 @@ def generate_nightly_chart(history: NightlyHistory, output_file: Path) -> None: # HTML/JS snippet for PR comparison chart (embedded in report) PR_CHART_SNIPPET = """ -
    +
    - - - - -
    -

    Bitcoin Core Nightly IBD Benchmark

    -

    - IBD from a single networked peer -

    -
    -
    -
    -

    - View PR benchmark results -

    -
    - - -""" - - @dataclass class NightlyResult: """A single nightly benchmark result with embedded config and machine info.""" @@ -594,174 +366,16 @@ def generate_nightly_chart(history: NightlyHistory, output_file: Path) -> None: history: NightlyHistory instance with loaded results output_file: Path to write index.html """ - # Convert results to simplified format for JS chart (config as string, not object) - chart_data = json.dumps(history.get_chart_data()) + from bench.render import render_template - html = NIGHTLY_CHART_TEMPLATE.format(chart_data=chart_data) + chart_data = history.get_chart_data() + html = render_template("nightly-chart.html", chart_data=chart_data) output_file.parent.mkdir(parents=True, exist_ok=True) output_file.write_text(html) logger.info(f"Generated nightly chart: {output_file}") -# HTML/JS snippet for PR comparison chart (embedded in report) -PR_CHART_SNIPPET = """ -
    - - -""" - - -def generate_pr_chart_snippet( - history: NightlyHistory, - pr_results: list[dict], -) -> str: - """Generate HTML/JS snippet for PR comparison chart. - - Args: - history: NightlyHistory with nightly results - pr_results: List of PR result dicts with keys: config, mean, stddev, commit, date - - Returns: - HTML string to embed in report - """ - nightly_data = json.dumps(history.get_chart_data()) - pr_data = json.dumps(pr_results) - - return PR_CHART_SNIPPET.format(nightly_data=nightly_data, pr_data=pr_data) - - class NightlyPhase: """CLI interface for nightly benchmark operations.""" diff --git a/bench/render.py b/bench/render.py new file mode 100644 index 000000000000..9f4893644211 --- /dev/null +++ b/bench/render.py @@ -0,0 +1,24 @@ +"""Jinja2 template rendering utilities.""" + +from __future__ import annotations + +from pathlib import Path +from typing import Any + +from jinja2 import Environment, FileSystemLoader, select_autoescape + + +def get_template_env() -> Environment: + """Get the Jinja2 environment for rendering templates.""" + template_dir = Path(__file__).parent / "templates" + return Environment( + loader=FileSystemLoader(template_dir), + autoescape=select_autoescape(["html", "xml"]), + ) + + +def render_template(template_name: str, **context: Any) -> str: + """Render a template with the given context.""" + env = get_template_env() + template = env.get_template(template_name) + return template.render(**context) diff --git a/bench/report.py b/bench/report.py index c50fd41a1e8e..1941dce55af6 100644 --- a/bench/report.py +++ b/bench/report.py @@ -7,7 +7,6 @@ import json import logging -import re import shutil from dataclasses import dataclass, field from datetime import date @@ -16,11 +15,11 @@ from bench.nightly import ( NightlyHistory, - generate_pr_chart_snippet, series_color_index, series_key, series_label, ) +from bench.render import render_template logger = logging.getLogger(__name__) @@ -93,69 +92,6 @@ def parse_network_name(network: str) -> tuple[int, str]: return dbcache, instrumentation -# HTML template for individual run report -RUN_REPORT_TEMPLATE = """ - - - Benchmark Results - - - -
    -

    Benchmark Results

    -
    -

    {title}

    - - -

    Run Data

    -
    - - - - - - - - - - - - {run_data_rows} - -
    ConfigMean (s)Std DevUser (s)System (s)
    -
    - - - {nightly_section} - - - {graphs_section} -
    -
    - -""" - -# HTML template for main index -INDEX_TEMPLATE = """ - - - Bitcoin Benchmark Results - - - -
    -

    Bitcoin Benchmark Results

    -
    -

    Available Results

    -
      - {run_list} -
    -
    -
    - -""" - - @dataclass class BenchmarkRun: """Parsed benchmark run data.""" @@ -354,7 +290,7 @@ def generate_index( results_dir: Directory containing pr-* subdirectories output_file: Where to write index.html """ - runs = [] + results = [] if results_dir.exists(): for pr_dir in sorted(results_dir.iterdir()): @@ -365,24 +301,9 @@ def generate_index( if run_dir.is_dir(): pr_runs.append(run_dir.name) if pr_runs: - runs.append((pr_num, pr_runs)) - - run_list_html = "" - for pr_num, pr_runs in runs: - run_links = "\n".join( - f'
  • Run {run}
  • ' - for run in pr_runs - ) - run_list_html += f""" -
  • PR #{pr_num} -
      - {run_links} -
    -
  • - """ - - html = INDEX_TEMPLATE.format(run_list=run_list_html) + results.append((pr_num, pr_runs)) + + html = render_template("results-index.html", results=results) output_file.write_text(html) logger.info(f"Generated index: {output_file}") @@ -517,125 +438,79 @@ def _generate_html( commit: str | None = None, ) -> str: """Generate the HTML report.""" - # Sort runs by network sorted_runs = sorted(runs, key=lambda r: r.network) - # Generate run data rows - run_data_rows = "" + runs_data = [] for run in sorted_runs: - stddev_str = f"{run.stddev:.3f}" if run.stddev else "N/A" - - # Format config name for display dbcache, instrumentation = parse_network_name(run.network) config_display = format_config_display( dbcache, instrumentation=instrumentation ) + runs_data.append( + { + "config_display": config_display, + "mean": run.mean, + "stddev": run.stddev, + "user": run.user, + "system": run.system, + } + ) - run_data_rows += f""" - - {config_display} - {run.mean:.3f} - {stddev_str} - {run.user:.3f} - {run.system:.3f} - - """ + nightly_data, pr_chart_data = self._prepare_nightly_data( + nightly_comparison, commit + ) - # Generate nightly comparison section - nightly_section = self._generate_nightly_section(nightly_comparison, commit) + graphs = self._prepare_graphs_data(runs, input_dir, output_dir) - # Generate graphs section - graphs_section = self._generate_graphs_section(runs, input_dir, output_dir) + nightly_chart_data = None + if pr_chart_data and self.nightly_history: + nightly_chart_data = self.nightly_history.get_chart_data() - return RUN_REPORT_TEMPLATE.format( + return render_template( + "pr-report.html", title=title, - run_data_rows=run_data_rows, - nightly_section=nightly_section, - graphs_section=graphs_section, + runs=runs_data, + nightly_comparison=nightly_data, + pr_chart_data=pr_chart_data, + nightly_chart_data=nightly_chart_data, + graphs=graphs, + repo_url=self.repo_url, ) - def _generate_nightly_section( + def _prepare_nightly_data( self, nightly_comparison: dict[str, dict[str, Any]], commit: str | None = None, - ) -> str: - """Generate the nightly comparison section with table and chart.""" + ) -> tuple[dict[str, dict[str, Any]], list[dict]]: + """Prepare nightly comparison data for template rendering. + + Returns: + Tuple of (nightly_comparison_with_display, pr_chart_data) + """ if not nightly_comparison: - return """ -
    -

    No nightly baseline data available for comparison.

    -
    - """ - - # Build comparison table - comparison_rows = "" - has_nightly_data = False + return {}, [] + + result = {} pr_chart_data = [] for config, data in sorted(nightly_comparison.items()): - pr_mean = data["pr_mean"] - pr_stddev = data.get("pr_stddev") - nightly_mean = data.get("nightly_mean") - nightly_date = data.get("nightly_date") - nightly_commit = data.get("nightly_commit") - speedup = data.get("speedup_percent") - - # Format PR time - pr_minutes = pr_mean / 60 - pr_time_str = f"{pr_minutes:.1f} min" - - # Format nightly time - if nightly_mean: - has_nightly_data = True - nightly_minutes = nightly_mean / 60 - nightly_time_str = f"{nightly_minutes:.1f} min" - # Include commit link if available - if nightly_commit: - short_commit = nightly_commit[:7] - commit_link = f'{short_commit}' - nightly_info = f"{nightly_time_str} ({nightly_date}, {commit_link})" - else: - nightly_info = f"{nightly_time_str} ({nightly_date})" - - # Format speedup - if speedup is not None: - color_class = "" - if speedup > 0: - color_class = "text-green-600" - elif speedup < 0: - color_class = "text-red-600" - sign = "+" if speedup > 0 else "" - speedup_str = f'{sign}{speedup}%' - else: - speedup_str = "N/A" - else: - nightly_info = "No baseline" - speedup_str = "N/A" - - # Config display name using helper try: dbcache = int(config) except ValueError: dbcache = 0 - config_name = format_config_display(dbcache) - - comparison_rows += f""" - - {config_name} - {pr_time_str} - {nightly_info} - {speedup_str} - - """ - - # Collect data for chart - if nightly_mean: + + result[config] = { + **data, + "config_display": format_config_display(dbcache), + } + + if data.get("nightly_mean"): key = data.get("series_key", f"unknown|db{config}|0-0") pr_chart_data.append( { "config": config, - "mean": pr_mean, - "stddev": pr_stddev or 0, + "mean": data["pr_mean"], + "stddev": data.get("pr_stddev") or 0, "commit": commit or "unknown", "date": date.today().isoformat(), "series_key": key, @@ -644,82 +519,32 @@ def _generate_nightly_section( } ) - # Build comparison table HTML - table_html = f""" -

    Comparison to Nightly Master

    -
    - - - - - - - - - - - {comparison_rows} - -
    ConfigPR TimeNightly Time (Date)Change
    -
    - """ - - # Add chart if we have nightly data - chart_html = "" - if has_nightly_data and self.nightly_history and pr_chart_data: - chart_html = f""" -

    Performance Trend

    -
    - {generate_pr_chart_snippet(self.nightly_history, pr_chart_data)} -
    - """ + return result, pr_chart_data - return table_html + chart_html - - def _linkify_commit(self, command: str) -> str: - """Convert commit hashes in command to links.""" - - def replace_commit(match): - commit = match.group(1) - short_commit = commit[:8] if len(commit) > 8 else commit - return f'({short_commit})' - - return re.sub(r"\(([a-f0-9]{7,40})\)", replace_commit, command) - - def _generate_graphs_section( + def _prepare_graphs_data( self, runs: list[BenchmarkRun], input_dir: Path, output_dir: Path, - ) -> str: - """Generate the flamegraphs and plots section.""" - graphs_html = "" + ) -> list[dict]: + """Prepare flamegraphs and plots data for template rendering.""" + graphs = [] for run in runs: - # Use the command/name directly (e.g., "base", "head") name = run.command network = run.network - # Check for flamegraph - try both with and without network prefix - # Network-prefixed: {network}-{name}-flamegraph.svg (for multi-network reports) - # Non-prefixed: {name}-flamegraph.svg (for single-network reports) flamegraph_name = None - flamegraph_path = None - network_prefixed = f"{network}-{name}-flamegraph.svg" non_prefixed = f"{name}-flamegraph.svg" if (output_dir / network_prefixed).exists(): flamegraph_name = network_prefixed - flamegraph_path = output_dir / network_prefixed elif (input_dir / non_prefixed).exists(): flamegraph_name = non_prefixed - flamegraph_path = input_dir / non_prefixed - # Check for plots - try both network-prefixed and non-prefixed directories plot_files = [] plots_dir = None - network_plots_dir = output_dir / f"{network}-plots" regular_plots_dir = input_dir / "plots" @@ -738,41 +563,24 @@ def _generate_graphs_section( if p.name.startswith(f"{name}-") and p.suffix == ".png" ] - if not flamegraph_path and not plot_files: + if not flamegraph_name and not plot_files: continue - # Build display label display_label = f"{network} - {name}" if network != "default" else name + plots_rel_path = plots_dir.name if plots_dir else "" - graphs_html += f""" -
    -

    {display_label}

    - """ - - if flamegraph_path: - graphs_html += f""" - - """ - - if plot_files and plots_dir: - # Determine the relative path for plots - plots_rel_path = plots_dir.name - for plot in sorted(plot_files): - graphs_html += f""" - - {plot} - - """ - - graphs_html += "
    " - - if graphs_html: - return f""" -

    Flamegraphs and Plots

    - {graphs_html} - """ + graphs.append( + { + "label": display_label, + "flamegraph": flamegraph_name, + "plots": [ + {"name": p, "path": f"{plots_rel_path}/{p}"} + for p in sorted(plot_files) + ], + } + ) - return "" + return graphs def _copy_artifacts(self, input_dir: Path, output_dir: Path) -> None: """Copy flamegraphs and plots to output directory.""" @@ -807,8 +615,6 @@ def __init__( ): nightly_history: NightlyHistory | None = None if nightly_history_file and nightly_history_file.exists(): - from bench.nightly import NightlyHistory - nightly_history = NightlyHistory(nightly_history_file) self.generator = ReportGenerator(repo_url, nightly_history) diff --git a/bench/templates/base.html b/bench/templates/base.html new file mode 100644 index 000000000000..d5f5f1ad5c57 --- /dev/null +++ b/bench/templates/base.html @@ -0,0 +1,14 @@ + + + + + + {% block title %}Benchcoin{% endblock %} + + {% block head %}{% endblock %} + + + {% block content %}{% endblock %} + {% block scripts %}{% endblock %} + + diff --git a/bench/templates/nightly-chart.html b/bench/templates/nightly-chart.html new file mode 100644 index 000000000000..2642df69d987 --- /dev/null +++ b/bench/templates/nightly-chart.html @@ -0,0 +1,202 @@ +{% extends 'base.html' %} + +{% block title %}Bitcoin Core Nightly IBD Benchmark{% endblock %} + +{% block head %} + + +{% endblock %} + +{% block body_class %}p-4 md:p-8{% endblock %} + +{% block content %} +{% include 'partials/theme-toggle.html' %} +
    +

    Bitcoin Core Nightly IBD Benchmark

    +

    + IBD from a single networked peer +

    +
    +
    +
    +

    + View PR benchmark results +

    +
    +{% endblock %} + +{% block scripts %} + +{% endblock %} diff --git a/bench/templates/partials/pr-chart.html b/bench/templates/partials/pr-chart.html new file mode 100644 index 000000000000..bef27e4a7b22 --- /dev/null +++ b/bench/templates/partials/pr-chart.html @@ -0,0 +1,129 @@ +
    + + diff --git a/bench/templates/partials/theme-toggle.html b/bench/templates/partials/theme-toggle.html new file mode 100644 index 000000000000..e0082e1a3958 --- /dev/null +++ b/bench/templates/partials/theme-toggle.html @@ -0,0 +1,25 @@ + + + diff --git a/bench/templates/pr-report.html b/bench/templates/pr-report.html new file mode 100644 index 000000000000..f6b37b8a43e1 --- /dev/null +++ b/bench/templates/pr-report.html @@ -0,0 +1,114 @@ +{% extends 'base.html' %} + +{% block title %}Benchmark Results{% endblock %} + +{% block content %} +
    +

    Benchmark Results

    +
    +

    {{ title }}

    + +

    Run Data

    +
    + + + + + + + + + + + + {% for run in runs %} + + + + + + + + {% endfor %} + +
    ConfigMean (s)Std DevUser (s)System (s)
    {{ run.config_display }}{{ "%.3f"|format(run.mean) }}{{ "%.3f"|format(run.stddev) if run.stddev else "N/A" }}{{ "%.3f"|format(run.user) }}{{ "%.3f"|format(run.system) }}
    +
    + + {% if nightly_comparison %} +

    Comparison to Nightly Master

    +
    + + + + + + + + + + + {% for config, data in nightly_comparison|dictsort %} + + + + + + + {% endfor %} + +
    ConfigPR TimeNightly Time (Date)Change
    {{ data.config_display }}{{ "%.1f"|format(data.pr_mean / 60) }} min + {% if data.nightly_mean %} + {{ "%.1f"|format(data.nightly_mean / 60) }} min + ({{ data.nightly_date }}{% if data.nightly_commit %}, + {{ data.nightly_commit[:7] }}{% endif %}) + {% else %} + No baseline + {% endif %} + + {% if data.speedup_percent is not none %} + {% if data.speedup_percent > 0 %} + +{{ data.speedup_percent }}% + {% elif data.speedup_percent < 0 %} + {{ data.speedup_percent }}% + {% else %} + {{ data.speedup_percent }}% + {% endif %} + {% else %} + N/A + {% endif %} +
    +
    + + {% if pr_chart_data %} +

    Performance Trend

    +
    + {% include 'partials/pr-chart.html' %} +
    + {% endif %} + + {% else %} +
    +

    No nightly baseline data available for comparison.

    +
    + {% endif %} + + {% if graphs %} +

    Flamegraphs and Plots

    + {% for graph in graphs %} +
    +

    {{ graph.label }}

    + {% if graph.flamegraph %} + + {% endif %} + {% for plot in graph.plots %} + + {{ plot.name }} + + {% endfor %} +
    + {% endfor %} + {% endif %} + +
    +
    +{% endblock %} diff --git a/bench/templates/results-index.html b/bench/templates/results-index.html new file mode 100644 index 000000000000..3dc580b173a0 --- /dev/null +++ b/bench/templates/results-index.html @@ -0,0 +1,23 @@ +{% extends 'base.html' %} + +{% block title %}Bitcoin Benchmark Results{% endblock %} + +{% block content %} +
    +

    Bitcoin Benchmark Results

    +
    +

    Available Results

    +
      + {% for pr_num, pr_runs in results %} +
    • PR #{{ pr_num }} + +
    • + {% endfor %} +
    +
    +
    +{% endblock %} diff --git a/bench/utils.py b/bench/utils.py index df454cf0644e..11d1c0cb18b2 100644 --- a/bench/utils.py +++ b/bench/utils.py @@ -69,8 +69,6 @@ def restore(self) -> None: class GitError(Exception): """Git operation failed.""" - pass - def git_checkout(commit: str, repo_path: Path | None = None) -> None: """Checkout a specific commit.""" diff --git a/flake.nix b/flake.nix index b42180629d1a..40a9e1d4ab68 100644 --- a/flake.nix +++ b/flake.nix @@ -157,6 +157,7 @@ pkgs.perf pkgs.perf-tools pkgs.python312 + pkgs.python312Packages.jinja2 pkgs.python312Packages.matplotlib pkgs.util-linux From c6ce7d968dfdf36e98b17a675733219b5e42640f Mon Sep 17 00:00:00 2001 From: will Date: Mon, 12 Jan 2026 15:10:00 +0000 Subject: [PATCH 12/46] use commit date in chart data points --- .github/workflows/nightly-benchmark.yml | 26 +++++++++++++++++++------ bench.py | 6 ++++++ bench/nightly.py | 20 +++++++++++++++---- 3 files changed, 42 insertions(+), 10 deletions(-) diff --git a/.github/workflows/nightly-benchmark.yml b/.github/workflows/nightly-benchmark.yml index 61109672bb45..273bbf89f6d4 100644 --- a/.github/workflows/nightly-benchmark.yml +++ b/.github/workflows/nightly-benchmark.yml @@ -26,8 +26,13 @@ jobs: BITCOIN_SHA=$(git merge-base HEAD upstream/master) echo "BITCOIN_SHA=$BITCOIN_SHA" >> "$GITHUB_ENV" + # Get commit date for the Bitcoin commit (for chart X-axis) + COMMIT_DATE=$(git log -1 --format=%cd --date=short "$BITCOIN_SHA") + echo "COMMIT_DATE=$COMMIT_DATE" >> "$GITHUB_ENV" + echo "Benchcoin: $(git rev-parse HEAD)" echo "Bitcoin merge-base: $BITCOIN_SHA" + echo "Commit date: $COMMIT_DATE" - name: Build master binary run: | @@ -44,10 +49,13 @@ jobs: - name: Upload commit info run: | echo "$BITCOIN_SHA" > ${{ runner.temp }}/commit.txt + echo "$COMMIT_DATE" > ${{ runner.temp }}/commit-date.txt - uses: actions/upload-artifact@v4 with: name: commit-info - path: ${{ runner.temp }}/commit.txt + path: | + ${{ runner.temp }}/commit.txt + ${{ runner.temp }}/commit-date.txt - name: Capture machine specs run: | @@ -151,9 +159,13 @@ jobs: with: python-version: '3.12' - - name: Get current date + - name: Get dates run: | - echo "DATE=$(date -u +%Y-%m-%d)" >> "$GITHUB_ENV" + # Commit date (for chart X-axis) + COMMIT_DATE=$(cat ./commit-info/commit-date.txt) + echo "COMMIT_DATE=$COMMIT_DATE" >> "$GITHUB_ENV" + # Run date (for reference) + echo "RUN_DATE=$(date -u +%Y-%m-%d)" >> "$GITHUB_ENV" - name: Append results to history run: | @@ -167,7 +179,8 @@ jobs: ../nightly-450-results/results.json \ "$COMMIT" \ 450 \ - --date "$DATE" \ + --date "$COMMIT_DATE" \ + --run-date "$RUN_DATE" \ --benchmark-config bench/configs/nightly.toml \ --machine-specs ../machine-specs/machine-specs.json @@ -178,7 +191,8 @@ jobs: ../nightly-32000-results/results.json \ "$COMMIT" \ 32000 \ - --date "$DATE" \ + --date "$COMMIT_DATE" \ + --run-date "$RUN_DATE" \ --benchmark-config bench/configs/nightly.toml \ --machine-specs ../machine-specs/machine-specs.json @@ -195,5 +209,5 @@ jobs: git config --global user.name "github-actions[bot]" git config --global user.email "github-actions[bot]@users.noreply.github.com" git add nightly-history.json index.html - git commit -m "Update nightly benchmark results for $DATE" || echo "No changes to commit" + git commit -m "Update nightly benchmark results for $COMMIT_DATE" || echo "No changes to commit" git push origin gh-pages diff --git a/bench.py b/bench.py index 2991969e57ea..da1904ee591f 100755 --- a/bench.py +++ b/bench.py @@ -323,6 +323,7 @@ def cmd_nightly(args: argparse.Namespace) -> int: benchmark_config_file=benchmark_config_file, instrumentation=args.instrumentation, machine_specs_file=machine_specs_file, + run_date=args.run_date or "", ) logger.info(f"Appended result to {history_file}") elif args.nightly_command == "chart": @@ -566,6 +567,11 @@ def main() -> int: metavar="PATH", help="Path to pre-captured machine specs JSON (default: detect current machine)", ) + nightly_append.add_argument( + "--run-date", + metavar="YYYY-MM-DD", + help="Date when benchmark was executed (default: today). Stored for reference.", + ) # nightly chart nightly_chart = nightly_subparsers.add_parser( diff --git a/bench/nightly.py b/bench/nightly.py index 80faf966dd84..362fd697abd4 100644 --- a/bench/nightly.py +++ b/bench/nightly.py @@ -138,7 +138,7 @@ def series_label(result: "NightlyResult") -> str: class NightlyResult: """A single nightly benchmark result with embedded config and machine info.""" - date: str + date: str # Commit date (YYYY-MM-DD) - displayed on chart X-axis commit: str mean: float stddev: float @@ -147,6 +147,7 @@ class NightlyResult: str, Any ] # Full benchmark config (dbcache inside config.bitcoind.dbcache) machine: dict[str, Any] # Full machine specs + run_date: str = "" # When benchmark was executed (reference only) @property def dbcache(self) -> int: @@ -168,7 +169,7 @@ def instrumentation(self) -> str: def to_dict(self) -> dict[str, Any]: """Convert to dictionary for JSON serialization.""" - return { + result = { "date": self.date, "commit": self.commit, "mean": self.mean, @@ -177,6 +178,9 @@ def to_dict(self) -> dict[str, Any]: "config": self.config, "machine": self.machine, } + if self.run_date: + result["run_date"] = self.run_date + return result @classmethod def from_dict(cls, data: dict[str, Any]) -> NightlyResult: @@ -194,6 +198,7 @@ def from_dict(cls, data: dict[str, Any]) -> NightlyResult: runs=data["runs"], config=data["config"], machine=data.get("machine", {}), + run_date=data.get("run_date", ""), ) # Legacy format - convert to new format @@ -209,6 +214,7 @@ def from_dict(cls, data: dict[str, Any]) -> NightlyResult: "instrumentation": "uninstrumented", }, machine={}, + run_date=data.get("run_date", ""), ) @@ -316,6 +322,7 @@ def append_from_results_json( benchmark_config: dict[str, Any], machine_specs: dict[str, Any], date_str: str | None = None, + run_date: str = "", ) -> None: """Append result from a hyperfine results.json file. @@ -324,7 +331,8 @@ def append_from_results_json( commit: Git commit hash benchmark_config: Full benchmark config dict (includes bitcoind.dbcache) machine_specs: Machine specs dict - date_str: Date string (YYYY-MM-DD), defaults to today + date_str: Commit date string (YYYY-MM-DD), defaults to today + run_date: When the benchmark was executed (YYYY-MM-DD), for reference """ if not results_file.exists(): raise FileNotFoundError(f"Results file not found: {results_file}") @@ -355,6 +363,7 @@ def append_from_results_json( runs=runs, config=benchmark_config, machine=machine_specs, + run_date=run_date, ) self.append(result) @@ -391,6 +400,7 @@ def append( benchmark_config_file: Path | None = None, instrumentation: str = "uninstrumented", machine_specs_file: Path | None = None, + run_date: str = "", ) -> None: """Append a result from hyperfine results.json to history. @@ -398,10 +408,11 @@ def append( results_file: Path to hyperfine results.json commit: Git commit hash dbcache: DB cache size in MB - date_str: Date string (YYYY-MM-DD), defaults to today + date_str: Commit date string (YYYY-MM-DD), defaults to today benchmark_config_file: Path to benchmark config TOML instrumentation: Instrumentation mode ('uninstrumented' or 'instrumented') machine_specs_file: Path to pre-captured machine specs JSON (optional) + run_date: When the benchmark was executed (YYYY-MM-DD), for reference """ from bench.benchmark_config import BenchmarkConfig @@ -435,6 +446,7 @@ def append( benchmark_config=config_dict, machine_specs=machine_specs, date_str=date_str, + run_date=run_date, ) history.save() From 9bf5428f7f9ab960e25a3192c2e56223555b44b6 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 13 Jan 2026 08:53:00 +0000 Subject: [PATCH 13/46] use nix flake in both publish workflow steps --- .github/workflows/nightly-benchmark.yml | 34 +++++++++++-------------- .github/workflows/publish-results.yml | 22 +++++++--------- 2 files changed, 24 insertions(+), 32 deletions(-) diff --git a/.github/workflows/nightly-benchmark.yml b/.github/workflows/nightly-benchmark.yml index 273bbf89f6d4..3ff6a930369b 100644 --- a/.github/workflows/nightly-benchmark.yml +++ b/.github/workflows/nightly-benchmark.yml @@ -154,10 +154,8 @@ jobs: name: machine-specs path: ./machine-specs - - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: '3.12' + - name: Install Nix + uses: cachix/install-nix-action@v31 - name: Get dates run: | @@ -170,39 +168,37 @@ jobs: - name: Append results to history run: | COMMIT=$(cat ./commit-info/commit.txt) - cd benchcoin-tools # Append 450 (default dbcache) result - python3 bench.py nightly \ - --history-file ../nightly-history.json \ + nix develop ./benchcoin-tools --command python3 benchcoin-tools/bench.py nightly \ + --history-file ./nightly-history.json \ append \ - ../nightly-450-results/results.json \ + ./nightly-450-results/results.json \ "$COMMIT" \ 450 \ --date "$COMMIT_DATE" \ --run-date "$RUN_DATE" \ - --benchmark-config bench/configs/nightly.toml \ - --machine-specs ../machine-specs/machine-specs.json + --benchmark-config benchcoin-tools/bench/configs/nightly.toml \ + --machine-specs ./machine-specs/machine-specs.json # Append 32000 (large dbcache) result - python3 bench.py nightly \ - --history-file ../nightly-history.json \ + nix develop ./benchcoin-tools --command python3 benchcoin-tools/bench.py nightly \ + --history-file ./nightly-history.json \ append \ - ../nightly-32000-results/results.json \ + ./nightly-32000-results/results.json \ "$COMMIT" \ 32000 \ --date "$COMMIT_DATE" \ --run-date "$RUN_DATE" \ - --benchmark-config bench/configs/nightly.toml \ - --machine-specs ../machine-specs/machine-specs.json + --benchmark-config benchcoin-tools/bench/configs/nightly.toml \ + --machine-specs ./machine-specs/machine-specs.json - name: Generate chart run: | - cd benchcoin-tools - python3 bench.py nightly \ - --history-file ../nightly-history.json \ + nix develop ./benchcoin-tools --command python3 benchcoin-tools/bench.py nightly \ + --history-file ./nightly-history.json \ chart \ - ../index.html + ./index.html - name: Commit and push to gh-pages run: | diff --git a/.github/workflows/publish-results.yml b/.github/workflows/publish-results.yml index 1c26113c28f7..cf4a32032c1a 100644 --- a/.github/workflows/publish-results.yml +++ b/.github/workflows/publish-results.yml @@ -81,10 +81,8 @@ jobs: fi done - - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: '3.12' + - name: Install Nix + uses: cachix/install-nix-action@v31 - name: Generate report id: generate @@ -93,28 +91,26 @@ jobs: RUN_ID: ${{ steps.metadata.outputs.run-id }} HEAD_SHA: ${{ steps.metadata.outputs.head-sha }} run: | - cd benchcoin-tools - # Build network arguments NETWORK_ARGS="" for network in ${NETWORKS//,/ }; do - if [ -d "../${network}-results" ]; then - NETWORK_ARGS="${NETWORK_ARGS} --network ${network}:../${network}-results" + if [ -d "./${network}-results" ]; then + NETWORK_ARGS="${NETWORK_ARGS} --network ${network}:./${network}-results" fi done # Generate report with nightly comparison (use per-machine history file) - python3 bench.py report \ + nix develop ./benchcoin-tools --command python3 benchcoin-tools/bench.py report \ ${NETWORK_ARGS} \ --pr-number "${PR_NUMBER}" \ --run-id "${RUN_ID}" \ --commit "${HEAD_SHA}" \ - --nightly-history "../nightly-history-${MACHINE_ID}.json" \ + --nightly-history "./nightly-history-${MACHINE_ID}.json" \ --update-index \ - "../results/pr-${PR_NUMBER}/${RUN_ID}" + "./results/pr-${PR_NUMBER}/${RUN_ID}" # Build comparison summary for PR comment - if [ -f "../nightly-history-${MACHINE_ID}.json" ]; then + if [ -f "./nightly-history-${MACHINE_ID}.json" ]; then COMPARISON=$(jq -r ' if .nightly_comparison then .nightly_comparison | to_entries | map( @@ -130,7 +126,7 @@ jobs: ) | join("\n- ") else "No comparison data available" end - ' "../results/pr-${PR_NUMBER}/${RUN_ID}/results.json") + ' "./results/pr-${PR_NUMBER}/${RUN_ID}/results.json") else COMPARISON="No nightly history available for comparison" fi From bbffd5ac78c47952e3a9020f3a7010c08562034e Mon Sep 17 00:00:00 2001 From: will Date: Tue, 13 Jan 2026 17:15:39 +0000 Subject: [PATCH 14/46] fix nightly-history mismatch --- .github/workflows/publish-results.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/publish-results.yml b/.github/workflows/publish-results.yml index cf4a32032c1a..e074f0582c2f 100644 --- a/.github/workflows/publish-results.yml +++ b/.github/workflows/publish-results.yml @@ -14,8 +14,6 @@ jobs: env: # Matrix entries from configs/pr.toml: dbcache=[450,32000] x instrumentation=[uninstrumented,instrumented] NETWORKS: "450-uninstrumented,450-instrumented,32000-uninstrumented,32000-instrumented" - # Machine ID for nightly history file (PR benchmarks run on x64 runners) - MACHINE_ID: "amd64" outputs: comparison: ${{ steps.generate.outputs.comparison }} pr-number: ${{ steps.metadata.outputs.pr-number }} @@ -105,12 +103,12 @@ jobs: --pr-number "${PR_NUMBER}" \ --run-id "${RUN_ID}" \ --commit "${HEAD_SHA}" \ - --nightly-history "./nightly-history-${MACHINE_ID}.json" \ + --nightly-history "./nightly-history.json" \ --update-index \ "./results/pr-${PR_NUMBER}/${RUN_ID}" # Build comparison summary for PR comment - if [ -f "./nightly-history-${MACHINE_ID}.json" ]; then + if [ -f "./nightly-history.json" ]; then COMPARISON=$(jq -r ' if .nightly_comparison then .nightly_comparison | to_entries | map( From 2a7228263c2c830b15dc303165ff88a427fa288b Mon Sep 17 00:00:00 2001 From: will Date: Tue, 13 Jan 2026 17:22:46 +0000 Subject: [PATCH 15/46] fix instrumented suffixes in reports --- bench/report.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bench/report.py b/bench/report.py index 1941dce55af6..db11d613fbef 100644 --- a/bench/report.py +++ b/bench/report.py @@ -360,16 +360,16 @@ def _calculate_nightly_comparison( logger.warning("No nightly history available for comparison") return comparison - # Group runs by network/config, only uninstrumented (no '-true' suffix) + # Group runs by network/config, only uninstrumented for run in runs: network = run.network # Skip instrumented configs - if network.endswith("-true"): + if network.endswith("-true") or network.endswith("-instrumented"): continue - # Extract base config name (e.g., "450-false" -> "450") - config = network.replace("-false", "") + # Extract base config name (e.g., "450-false" -> "450", "450-uninstrumented" -> "450") + config = network.replace("-false", "").replace("-uninstrumented", "") # Get PR result mean pr_mean = run.mean From 626d33bb421d96c7914b53a1b7f31af630521a35 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 13 Jan 2026 20:39:11 +0000 Subject: [PATCH 16/46] add clickable plotly links --- bench/templates/nightly-chart.html | 5 +++++ bench/templates/partials/pr-chart.html | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/bench/templates/nightly-chart.html b/bench/templates/nightly-chart.html index 2642df69d987..0a7cfb177e60 100644 --- a/bench/templates/nightly-chart.html +++ b/bench/templates/nightly-chart.html @@ -197,6 +197,11 @@

    Bitcoin Core Nightly IBD Benchmark

    Plotly.react('nightly-chart', buildTraces(), getLayout(theme), config); } + document.getElementById('nightly-chart').on('plotly_click', function(data) { + const commit = data.points[0].customdata[0]; + window.open('https://github.com/bitcoin/bitcoin/commit/' + commit, '_blank'); + }); + setTheme(getPreferredTheme()); {% endblock %} diff --git a/bench/templates/partials/pr-chart.html b/bench/templates/partials/pr-chart.html index bef27e4a7b22..b13ebffcf8c5 100644 --- a/bench/templates/partials/pr-chart.html +++ b/bench/templates/partials/pr-chart.html @@ -126,4 +126,9 @@ }; Plotly.newPlot('pr-comparison-chart', buildTraces(), layout, config); + + document.getElementById('pr-comparison-chart').on('plotly_click', function(data) { + const commit = data.points[0].customdata[0]; + window.open('https://github.com/bitcoin/bitcoin/commit/' + commit, '_blank'); + }); From 0823859980cc8ee1c4a3454769d1bf9ff5347452 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 13 Jan 2026 20:39:24 +0000 Subject: [PATCH 17/46] use corect path in index --- bench/templates/results-index.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bench/templates/results-index.html b/bench/templates/results-index.html index 3dc580b173a0..5a39cbee3672 100644 --- a/bench/templates/results-index.html +++ b/bench/templates/results-index.html @@ -12,7 +12,7 @@

    Available Results

  • PR #{{ pr_num }}
  • From 8a60e6707a72e3ad2cc926994c5f8f9daf99a414 Mon Sep 17 00:00:00 2001 From: will Date: Wed, 14 Jan 2026 09:20:34 +0000 Subject: [PATCH 18/46] use scatter plot for leveldb compaction --- bench/analyze.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bench/analyze.py b/bench/analyze.py index baedd97d745c..c4af916a8d68 100644 --- a/bench/analyze.py +++ b/bench/analyze.py @@ -425,7 +425,7 @@ def _plot( return plt.figure(figsize=(30, 10)) - plt.plot(x, y) + plt.scatter(x, y, alpha=0.6, s=20) plt.title(title, fontsize=20) plt.xlabel(x_label, fontsize=16) plt.ylabel(y_label, fontsize=16) From c02c9635496510de0a0737a826154d7e78573067 Mon Sep 17 00:00:00 2001 From: will Date: Wed, 14 Jan 2026 09:30:51 +0000 Subject: [PATCH 19/46] add debug logs to artifacts --- .github/workflows/benchmark.yml | 7 +++++ .github/workflows/nightly-benchmark.yml | 7 +++++ .github/workflows/publish-results.yml | 5 ++++ bench/benchmark.py | 34 ++++++++++++------------- bench/report.py | 24 ++++++++++++++++- bench/templates/pr-report.html | 7 +++++ 6 files changed, 66 insertions(+), 18 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 8cc02422ce8f..438926844fdb 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -83,6 +83,13 @@ jobs: path: ${{ runner.temp }}/output/*-flamegraph.svg if-no-files-found: ignore + - name: Upload debug logs + uses: actions/upload-artifact@v4 + with: + name: debug-logs-${{ matrix.name }} + path: ${{ runner.temp }}/output/*-debug.log + if-no-files-found: ignore + - name: Write context metadata env: GITHUB_CONTEXT: ${{ toJSON(github) }} diff --git a/.github/workflows/nightly-benchmark.yml b/.github/workflows/nightly-benchmark.yml index 3ff6a930369b..ea88c2ebde0c 100644 --- a/.github/workflows/nightly-benchmark.yml +++ b/.github/workflows/nightly-benchmark.yml @@ -113,6 +113,13 @@ jobs: name: result-nightly-${{ matrix.name }} path: ${{ runner.temp }}/output/results.json + - name: Upload debug logs + uses: actions/upload-artifact@v4 + with: + name: debug-logs-nightly-${{ matrix.name }} + path: ${{ runner.temp }}/output/*-debug.log + if-no-files-found: ignore + publish: needs: benchmark runs-on: ubuntu-latest diff --git a/.github/workflows/publish-results.yml b/.github/workflows/publish-results.yml index e074f0582c2f..36788cb6729c 100644 --- a/.github/workflows/publish-results.yml +++ b/.github/workflows/publish-results.yml @@ -55,6 +55,11 @@ jobs: cp -r "pngs-${network}"/* "${network}-results/plots/" 2>/dev/null || true fi + # Copy debug logs into network results directory + if [ -d "debug-logs-${network}" ]; then + cp -r "debug-logs-${network}"/* "${network}-results/" 2>/dev/null || true + fi + # Keep metadata separate for extraction if [ -d "run-metadata-${network}" ]; then mkdir -p "${network}-metadata" diff --git a/bench/benchmark.py b/bench/benchmark.py index 3c289b4f6628..465470ba7f5f 100644 --- a/bench/benchmark.py +++ b/bench/benchmark.py @@ -165,18 +165,18 @@ def run( name=name, ) - # For instrumented runs, collect flamegraph and debug log + # Collect debug log (all runs) + debug_log_file = output_dir / f"{name}-debug.log" + if debug_log_file.exists(): + result.debug_log = debug_log_file + logger.info(f"Collected debug log: {debug_log_file}") + + # For instrumented runs, also collect flamegraph if self.is_instrumented: - logger.info("Collecting instrumented artifacts...") flamegraph_file = output_dir / f"{name}-flamegraph.svg" - debug_log_file = output_dir / f"{name}-debug.log" - if flamegraph_file.exists(): result.flamegraph = flamegraph_file - logger.info(f" Flamegraph: {flamegraph_file}") - if debug_log_file.exists(): - result.debug_log = debug_log_file - logger.info(f" Debug log: {debug_log_file}") + logger.info(f"Collected flamegraph: {flamegraph_file}") # Clean up tmp_datadir if tmp_datadir.exists(): @@ -313,10 +313,9 @@ def _build_hyperfine_cmd( # Build the actual command to benchmark bitcoind_cmd = self._build_bitcoind_cmd(binary_path, tmp_datadir) - # For instrumented runs, append the conclude logic - if self.is_instrumented: - conclude = self._create_conclude_commands(name, tmp_datadir, output_dir) - bitcoind_cmd += f" && {conclude}" + # Append conclude logic (debug.log for all, flamegraph for instrumented) + conclude = self._create_conclude_commands(name, tmp_datadir, output_dir) + bitcoind_cmd += f" && {conclude}" cmd.append(bitcoind_cmd) @@ -331,12 +330,13 @@ def _create_conclude_commands( """Create inline conclude commands for the binary.""" commands = [] - # Move flamegraph if exists - commands.append( - f'if [ -e flamegraph.svg ]; then mv flamegraph.svg "{output_dir}/{name}-flamegraph.svg"; fi' - ) + # Move flamegraph if exists (instrumented only) + if self.is_instrumented: + commands.append( + f'if [ -e flamegraph.svg ]; then mv flamegraph.svg "{output_dir}/{name}-flamegraph.svg"; fi' + ) - # Copy debug log if exists + # Copy debug log if exists (all runs) commands.append( f'debug_log=$(find "{tmp_datadir}" -name debug.log -print -quit); ' f'if [ -n "$debug_log" ]; then cp "$debug_log" "{output_dir}/{name}-debug.log"; fi' diff --git a/bench/report.py b/bench/report.py index db11d613fbef..2976f3a365a1 100644 --- a/bench/report.py +++ b/bench/report.py @@ -428,6 +428,12 @@ def _copy_network_artifacts( shutil.copytree(plots_dir, dest_plots) logger.debug(f"Copied plots to {dest_plots}") + # Copy debug logs with network prefix + for log in input_dir.glob("*-debug.log"): + dest = output_dir / f"{network}-{log.name}" + shutil.copy2(log, dest) + logger.debug(f"Copied {log.name} as {dest.name}") + def _generate_html( self, runs: list[BenchmarkRun], @@ -563,7 +569,16 @@ def _prepare_graphs_data( if p.name.startswith(f"{name}-") and p.suffix == ".png" ] - if not flamegraph_name and not plot_files: + debug_log_name = None + network_prefixed_log = f"{network}-{name}-debug.log" + non_prefixed_log = f"{name}-debug.log" + + if (output_dir / network_prefixed_log).exists(): + debug_log_name = network_prefixed_log + elif (input_dir / non_prefixed_log).exists(): + debug_log_name = non_prefixed_log + + if not flamegraph_name and not plot_files and not debug_log_name: continue display_label = f"{network} - {name}" if network != "default" else name @@ -573,6 +588,7 @@ def _prepare_graphs_data( { "label": display_label, "flamegraph": flamegraph_name, + "debug_log": debug_log_name, "plots": [ {"name": p, "path": f"{plots_rel_path}/{p}"} for p in sorted(plot_files) @@ -604,6 +620,12 @@ def _copy_artifacts(self, input_dir: Path, output_dir: Path) -> None: shutil.copytree(plots_dir, dest_plots) logger.debug("Copied plots directory") + # Copy debug logs + for log in input_dir.glob("*-debug.log"): + dest = output_dir / log.name + shutil.copy2(log, dest) + logger.debug(f"Copied {log.name}") + class ReportPhase: """Generate reports from benchmark results.""" diff --git a/bench/templates/pr-report.html b/bench/templates/pr-report.html index f6b37b8a43e1..197908ed1e12 100644 --- a/bench/templates/pr-report.html +++ b/bench/templates/pr-report.html @@ -105,6 +105,13 @@

    {{ graph.label }}

    {{ plot.name }} {% endfor %} + {% if graph.debug_log %} + + {% endif %}
    {% endfor %} {% endif %} From 747e76f3f320226595972099df992fd4901b78fd Mon Sep 17 00:00:00 2001 From: will Date: Wed, 14 Jan 2026 09:40:29 +0000 Subject: [PATCH 20/46] dynamic charts test --- .github/workflows/benchmark.yml | 7 - .github/workflows/publish-results.yml | 6 - bench/report.py | 75 +--- bench/templates/base.html | 2 + .../templates/partials/debug-log-charts.html | 344 ++++++++++++++++++ bench/templates/pr-report.html | 14 +- 6 files changed, 372 insertions(+), 76 deletions(-) create mode 100644 bench/templates/partials/debug-log-charts.html diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 438926844fdb..730ab434ee14 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -69,13 +69,6 @@ jobs: name: result-${{ matrix.name }} path: ${{ runner.temp }}/output/results.json - - name: Upload plots - uses: actions/upload-artifact@v4 - with: - name: pngs-${{ matrix.name }} - path: ${{ runner.temp }}/output/plots/*.png - if-no-files-found: ignore - - name: Upload flamegraphs uses: actions/upload-artifact@v4 with: diff --git a/.github/workflows/publish-results.yml b/.github/workflows/publish-results.yml index 36788cb6729c..59dd6369741c 100644 --- a/.github/workflows/publish-results.yml +++ b/.github/workflows/publish-results.yml @@ -49,12 +49,6 @@ jobs: cp -r "flamegraph-${network}"/* "${network}-results/" 2>/dev/null || true fi - # Copy plots into network results directory - if [ -d "pngs-${network}" ]; then - mkdir -p "${network}-results/plots" - cp -r "pngs-${network}"/* "${network}-results/plots/" 2>/dev/null || true - fi - # Copy debug logs into network results directory if [ -d "debug-logs-${network}" ]; then cp -r "debug-logs-${network}"/* "${network}-results/" 2>/dev/null || true diff --git a/bench/report.py b/bench/report.py index 2976f3a365a1..e80c912e3c9f 100644 --- a/bench/report.py +++ b/bench/report.py @@ -5,6 +5,7 @@ from __future__ import annotations +import gzip import json import logging import shutil @@ -419,20 +420,13 @@ def _copy_network_artifacts( shutil.copy2(svg, dest) logger.debug(f"Copied {svg.name} as {dest.name}") - # Copy plots directory with network prefix - plots_dir = input_dir / "plots" - if plots_dir.exists(): - dest_plots = output_dir / f"{network}-plots" - if dest_plots.exists(): - shutil.rmtree(dest_plots) - shutil.copytree(plots_dir, dest_plots) - logger.debug(f"Copied plots to {dest_plots}") - - # Copy debug logs with network prefix + # Gzip and copy debug logs with network prefix for log in input_dir.glob("*-debug.log"): - dest = output_dir / f"{network}-{log.name}" - shutil.copy2(log, dest) - logger.debug(f"Copied {log.name} as {dest.name}") + dest = output_dir / f"{network}-{log.name}.gz" + with open(log, "rb") as f_in: + with gzip.open(dest, "wb") as f_out: + shutil.copyfileobj(f_in, f_out) + logger.debug(f"Compressed {log.name} as {dest.name}") def _generate_html( self, @@ -533,7 +527,7 @@ def _prepare_graphs_data( input_dir: Path, output_dir: Path, ) -> list[dict]: - """Prepare flamegraphs and plots data for template rendering.""" + """Prepare flamegraphs and debug logs data for template rendering.""" graphs = [] for run in runs: @@ -549,57 +543,35 @@ def _prepare_graphs_data( elif (input_dir / non_prefixed).exists(): flamegraph_name = non_prefixed - plot_files = [] - plots_dir = None - network_plots_dir = output_dir / f"{network}-plots" - regular_plots_dir = input_dir / "plots" - - if network_plots_dir.exists(): - plots_dir = network_plots_dir - plot_files = [ - p.name - for p in plots_dir.iterdir() - if p.name.startswith(f"{name}-") and p.suffix == ".png" - ] - elif regular_plots_dir.exists(): - plots_dir = regular_plots_dir - plot_files = [ - p.name - for p in plots_dir.iterdir() - if p.name.startswith(f"{name}-") and p.suffix == ".png" - ] - debug_log_name = None + network_prefixed_log_gz = f"{network}-{name}-debug.log.gz" network_prefixed_log = f"{network}-{name}-debug.log" non_prefixed_log = f"{name}-debug.log" - if (output_dir / network_prefixed_log).exists(): + if (output_dir / network_prefixed_log_gz).exists(): + debug_log_name = network_prefixed_log_gz + elif (output_dir / network_prefixed_log).exists(): debug_log_name = network_prefixed_log elif (input_dir / non_prefixed_log).exists(): debug_log_name = non_prefixed_log - if not flamegraph_name and not plot_files and not debug_log_name: + if not flamegraph_name and not debug_log_name: continue display_label = f"{network} - {name}" if network != "default" else name - plots_rel_path = plots_dir.name if plots_dir else "" graphs.append( { "label": display_label, "flamegraph": flamegraph_name, "debug_log": debug_log_name, - "plots": [ - {"name": p, "path": f"{plots_rel_path}/{p}"} - for p in sorted(plot_files) - ], } ) return graphs def _copy_artifacts(self, input_dir: Path, output_dir: Path) -> None: - """Copy flamegraphs and plots to output directory.""" + """Copy flamegraphs and gzip debug logs to output directory.""" # Skip if input and output are the same directory if input_dir.resolve() == output_dir.resolve(): logger.debug("Input and output are the same directory, skipping copy") @@ -611,20 +583,13 @@ def _copy_artifacts(self, input_dir: Path, output_dir: Path) -> None: shutil.copy2(svg, dest) logger.debug(f"Copied {svg.name}") - # Copy plots directory - plots_dir = input_dir / "plots" - if plots_dir.exists(): - dest_plots = output_dir / "plots" - if dest_plots.exists(): - shutil.rmtree(dest_plots) - shutil.copytree(plots_dir, dest_plots) - logger.debug("Copied plots directory") - - # Copy debug logs + # Gzip and copy debug logs for log in input_dir.glob("*-debug.log"): - dest = output_dir / log.name - shutil.copy2(log, dest) - logger.debug(f"Copied {log.name}") + dest = output_dir / f"{log.name}.gz" + with open(log, "rb") as f_in: + with gzip.open(dest, "wb") as f_out: + shutil.copyfileobj(f_in, f_out) + logger.debug(f"Compressed {log.name} as {dest.name}") class ReportPhase: diff --git a/bench/templates/base.html b/bench/templates/base.html index d5f5f1ad5c57..22df32c8c2d5 100644 --- a/bench/templates/base.html +++ b/bench/templates/base.html @@ -5,6 +5,8 @@ {% block title %}Benchcoin{% endblock %} + + {% block head %}{% endblock %} diff --git a/bench/templates/partials/debug-log-charts.html b/bench/templates/partials/debug-log-charts.html new file mode 100644 index 000000000000..c4f7e475bb0e --- /dev/null +++ b/bench/templates/partials/debug-log-charts.html @@ -0,0 +1,344 @@ +
    +
    Loading charts from debug.log...
    +
    + + diff --git a/bench/templates/pr-report.html b/bench/templates/pr-report.html index 197908ed1e12..56e9eac19664 100644 --- a/bench/templates/pr-report.html +++ b/bench/templates/pr-report.html @@ -93,24 +93,22 @@

    Performance Trend

    {% endif %} {% if graphs %} -

    Flamegraphs and Plots

    +

    Flamegraphs and Charts

    {% for graph in graphs %}

    {{ graph.label }}

    {% if graph.flamegraph %} {% endif %} - {% for plot in graph.plots %} - - {{ plot.name }} - - {% endfor %} {% if graph.debug_log %} -
    + + {% set chart_id = graph.label | replace(' ', '-') | replace('.', '') | lower %} + {% set debug_log_url = graph.debug_log %} + {% include 'partials/debug-log-charts.html' %} {% endif %}
    {% endfor %} From f95908c1e85860d2a980b23c21261ca34d8aeecd Mon Sep 17 00:00:00 2001 From: will Date: Wed, 14 Jan 2026 09:41:30 +0000 Subject: [PATCH 21/46] fix theme render order --- bench/templates/nightly-chart.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bench/templates/nightly-chart.html b/bench/templates/nightly-chart.html index 0a7cfb177e60..c3732fab14b6 100644 --- a/bench/templates/nightly-chart.html +++ b/bench/templates/nightly-chart.html @@ -197,11 +197,11 @@

    Bitcoin Core Nightly IBD Benchmark

    Plotly.react('nightly-chart', buildTraces(), getLayout(theme), config); } + setTheme(getPreferredTheme()); + document.getElementById('nightly-chart').on('plotly_click', function(data) { const commit = data.points[0].customdata[0]; window.open('https://github.com/bitcoin/bitcoin/commit/' + commit, '_blank'); }); - - setTheme(getPreferredTheme()); {% endblock %} From 31b8170e8da3ed1bcadb140db93b6db406d97654 Mon Sep 17 00:00:00 2001 From: will Date: Wed, 14 Jan 2026 15:32:45 +0000 Subject: [PATCH 22/46] add ruff and ty to flake --- flake.nix | 300 +++++++++++++++++++++++++++--------------------------- 1 file changed, 149 insertions(+), 151 deletions(-) diff --git a/flake.nix b/flake.nix index 40a9e1d4ab68..a7a1917e4b1b 100644 --- a/flake.nix +++ b/flake.nix @@ -3,169 +3,167 @@ inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.11"; - outputs = - { self, nixpkgs }: - let - systems = [ - "x86_64-linux" - "aarch64-darwin" - ]; + outputs = { + self, + nixpkgs, + }: let + systems = [ + "x86_64-linux" + "aarch64-darwin" + ]; - forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system); + forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system); - pkgsFor = system: import nixpkgs { inherit system; }; + pkgsFor = system: import nixpkgs {inherit system;}; - mkBitcoinCore = - system: - let - pkgs = pkgsFor system; - inherit (pkgs) lib; + mkBitcoinCore = system: let + pkgs = pkgsFor system; + inherit (pkgs) lib; - pname = "bitcoin-core"; - version = self.shortRev or "dirty"; + pname = "bitcoin-core"; + version = self.shortRev or "dirty"; - CFlags = toString [ - "-O2" - "-g" - ]; - CXXFlags = "${CFlags} -fno-omit-frame-pointer"; + CFlags = toString [ + "-O2" + "-g" + ]; + CXXFlags = "${CFlags} -fno-omit-frame-pointer"; - nativeBuildInputs = [ - pkgs.cmake - pkgs.ninja - pkgs.pkg-config - pkgs.python3 - ]; + nativeBuildInputs = [ + pkgs.cmake + pkgs.ninja + pkgs.pkg-config + pkgs.python3 + ]; - buildInputs = [ - pkgs.boost188.dev - pkgs.libevent.dev - ]; + buildInputs = [ + pkgs.boost188.dev + pkgs.libevent.dev + ]; - cmakeFlags = [ - "-DBUILD_BENCH=OFF" - "-DBUILD_BITCOIN_BIN=OFF" - "-DBUILD_CLI=OFF" - "-DBUILD_DAEMON=ON" - "-DBUILD_FUZZ_BINARY=OFF" - "-DBUILD_GUI_TESTS=OFF" - "-DBUILD_TESTS=OFF" - "-DBUILD_TX=OFF" - "-DBUILD_UTIL=OFF" - "-DBUILD_WALLET_TOOL=OFF" - "-DCMAKE_BUILD_TYPE=RelWithDebInfo" - "-DCMAKE_SKIP_RPATH=ON" - "-DENABLE_EXTERNAL_SIGNER=OFF" - "-DENABLE_IPC=OFF" - "-DENABLE_WALLET=OFF" - "-DREDUCE_EXPORTS=ON" - "-DWITH_ZMQ=OFF" - ]; - in - pkgs.stdenv.mkDerivation { - inherit - pname - version - nativeBuildInputs - buildInputs - cmakeFlags - ; - - preConfigure = '' - cmakeFlagsArray+=( - "-DAPPEND_CFLAGS=${CFlags}" - "-DAPPEND_CXXFLAGS=${CXXFlags}" - "-DAPPEND_LDFLAGS=-Wl,--as-needed -Wl,-O2" - ) - ''; + cmakeFlags = [ + "-DBUILD_BENCH=OFF" + "-DBUILD_BITCOIN_BIN=OFF" + "-DBUILD_CLI=OFF" + "-DBUILD_DAEMON=ON" + "-DBUILD_FUZZ_BINARY=OFF" + "-DBUILD_GUI_TESTS=OFF" + "-DBUILD_TESTS=OFF" + "-DBUILD_TX=OFF" + "-DBUILD_UTIL=OFF" + "-DBUILD_WALLET_TOOL=OFF" + "-DCMAKE_BUILD_TYPE=RelWithDebInfo" + "-DCMAKE_SKIP_RPATH=ON" + "-DENABLE_EXTERNAL_SIGNER=OFF" + "-DENABLE_IPC=OFF" + "-DENABLE_WALLET=OFF" + "-DREDUCE_EXPORTS=ON" + "-DWITH_ZMQ=OFF" + ]; + in + pkgs.stdenv.mkDerivation { + inherit + pname + version + nativeBuildInputs + buildInputs + cmakeFlags + ; + + preConfigure = '' + cmakeFlagsArray+=( + "-DAPPEND_CFLAGS=${CFlags}" + "-DAPPEND_CXXFLAGS=${CXXFlags}" + "-DAPPEND_LDFLAGS=-Wl,--as-needed -Wl,-O2" + ) + ''; + + src = builtins.path { + path = ./.; + name = "source"; + }; - src = builtins.path { - path = ./.; - name = "source"; - }; + env = { + CMAKE_GENERATOR = "Ninja"; + LC_ALL = "C"; + LIBRARY_PATH = ""; + CPATH = ""; + C_INCLUDE_PATH = ""; + CPLUS_INCLUDE_PATH = ""; + OBJC_INCLUDE_PATH = ""; + OBJCPLUS_INCLUDE_PATH = ""; + }; + + dontStrip = true; - env = { - CMAKE_GENERATOR = "Ninja"; - LC_ALL = "C"; - LIBRARY_PATH = ""; - CPATH = ""; - C_INCLUDE_PATH = ""; - CPLUS_INCLUDE_PATH = ""; - OBJC_INCLUDE_PATH = ""; - OBJCPLUS_INCLUDE_PATH = ""; + meta = { + description = "bitcoind for benchmarking"; + homepage = "https://bitcoincore.org/"; + license = lib.licenses.mit; + }; + }; + in { + packages = forAllSystems (system: { + default = mkBitcoinCore system; + }); + + formatter = forAllSystems (system: (pkgsFor system).nixfmt-tree); + + devShells = forAllSystems ( + system: let + pkgs = pkgsFor system; + inherit (pkgs) stdenv; + + # Override the default cargo-flamegraph with a custom fork including bitcoin highlighting + cargo-flamegraph = pkgs.rustPlatform.buildRustPackage rec { + pname = "flamegraph"; + version = "bitcoin-core"; + + src = pkgs.fetchFromGitHub { + owner = "willcl-ark"; + repo = "flamegraph"; + rev = "bitcoin-core"; + sha256 = "sha256-tQbr3MYfAiOxeT12V9au5KQK5X5JeGuV6p8GR/Sgen4="; }; - dontStrip = true; + doCheck = false; + cargoHash = "sha256-QWPqTyTFSZNJNayNqLmsQSu0rX26XBKfdLROZ9tRjrg="; - meta = { - description = "bitcoind for benchmarking"; - homepage = "https://bitcoincore.org/"; - license = lib.licenses.mit; - }; + nativeBuildInputs = pkgs.lib.optionals stdenv.hostPlatform.isLinux [pkgs.makeWrapper]; + buildInputs = pkgs.lib.optionals stdenv.hostPlatform.isDarwin [ + pkgs.darwin.apple_sdk.frameworks.Security + ]; + + postFixup = pkgs.lib.optionalString stdenv.hostPlatform.isLinux '' + wrapProgram $out/bin/cargo-flamegraph \ + --set-default PERF ${pkgs.perf}/bin/perf + wrapProgram $out/bin/flamegraph \ + --set-default PERF ${pkgs.perf}/bin/perf + ''; }; - in - { - packages = forAllSystems (system: { - default = mkBitcoinCore system; - }); - - formatter = forAllSystems (system: (pkgsFor system).nixfmt-tree); - - devShells = forAllSystems ( - system: - let - pkgs = pkgsFor system; - inherit (pkgs) stdenv; - - # Override the default cargo-flamegraph with a custom fork including bitcoin highlighting - cargo-flamegraph = pkgs.rustPlatform.buildRustPackage rec { - pname = "flamegraph"; - version = "bitcoin-core"; - - src = pkgs.fetchFromGitHub { - owner = "willcl-ark"; - repo = "flamegraph"; - rev = "bitcoin-core"; - sha256 = "sha256-tQbr3MYfAiOxeT12V9au5KQK5X5JeGuV6p8GR/Sgen4="; - }; - - doCheck = false; - cargoHash = "sha256-QWPqTyTFSZNJNayNqLmsQSu0rX26XBKfdLROZ9tRjrg="; - - nativeBuildInputs = pkgs.lib.optionals stdenv.hostPlatform.isLinux [ pkgs.makeWrapper ]; - buildInputs = pkgs.lib.optionals stdenv.hostPlatform.isDarwin [ - pkgs.darwin.apple_sdk.frameworks.Security - ]; - - postFixup = pkgs.lib.optionalString stdenv.hostPlatform.isLinux '' - wrapProgram $out/bin/cargo-flamegraph \ - --set-default PERF ${pkgs.perf}/bin/perf - wrapProgram $out/bin/flamegraph \ - --set-default PERF ${pkgs.perf}/bin/perf - ''; - }; - in - { - default = pkgs.mkShell { - buildInputs = [ - # Benchmarking - cargo-flamegraph - pkgs.flamegraph - pkgs.hyperfine - pkgs.jq - pkgs.just - pkgs.perf - pkgs.perf-tools - pkgs.python312 - pkgs.python312Packages.jinja2 - pkgs.python312Packages.matplotlib - pkgs.util-linux - - # Binary patching - pkgs.patchelf - ]; - }; - } - ); - }; + in { + default = pkgs.mkShell { + buildInputs = [ + # Benchmarking + cargo-flamegraph + pkgs.flamegraph + pkgs.hyperfine + pkgs.jq + pkgs.just + pkgs.perf + pkgs.perf-tools + pkgs.python312 + pkgs.python312Packages.jinja2 + pkgs.python312Packages.matplotlib + pkgs.ruff + pkgs.ty + pkgs.util-linux + + # Binary patching + pkgs.patchelf + ]; + }; + } + ); + }; } From dbe1874e0fe497c32314672b668cef744b2db53f Mon Sep 17 00:00:00 2001 From: will Date: Wed, 14 Jan 2026 15:35:06 +0000 Subject: [PATCH 23/46] add ty.toml --- bench/benchmark.py | 1 + ty.toml | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 ty.toml diff --git a/bench/benchmark.py b/bench/benchmark.py index 465470ba7f5f..f6e8b696a666 100644 --- a/bench/benchmark.py +++ b/bench/benchmark.py @@ -104,6 +104,7 @@ def run( # Setup directories output_dir.mkdir(parents=True, exist_ok=True) + assert self.config.tmp_datadir is not None tmp_datadir = Path(self.config.tmp_datadir) tmp_datadir.mkdir(parents=True, exist_ok=True) diff --git a/ty.toml b/ty.toml new file mode 100644 index 000000000000..b208f32ade1e --- /dev/null +++ b/ty.toml @@ -0,0 +1,2 @@ +[src] +include = ["bench.py", "bench/*.py"] From d4fc44db4fbf9926f37b92053e9336c6bf76292c Mon Sep 17 00:00:00 2001 From: will Date: Wed, 14 Jan 2026 15:35:30 +0000 Subject: [PATCH 24/46] add ruff.toml --- ruff.toml | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 ruff.toml diff --git a/ruff.toml b/ruff.toml new file mode 100644 index 000000000000..35d385e82ac3 --- /dev/null +++ b/ruff.toml @@ -0,0 +1,2 @@ +target-version = "py312" +include = ["bench.py", "bench/*.py"] From 4658fb0deb0335c81f97d3577c08fe49c573a3b2 Mon Sep 17 00:00:00 2001 From: will Date: Wed, 14 Jan 2026 15:30:51 +0000 Subject: [PATCH 25/46] support a full IBD PR run --- bench.py | 5 ++++- bench/benchmark_config.py | 11 ++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/bench.py b/bench.py index da1904ee591f..6dc371a13d27 100755 --- a/bench.py +++ b/bench.py @@ -105,9 +105,12 @@ def cmd_run(args: argparse.Namespace) -> int: return 1 logger.info(f"Using matrix entry: {matrix_entry}") + # In full IBD mode, ignore datadir (sync from genesis) + datadir = None if benchmark_config.full_ibd else args.datadir + # Build config with CLI args and benchmark config values cli_args: dict = { - "datadir": args.datadir, + "datadir": datadir, "tmp_datadir": args.tmp_datadir, "output_dir": args.output_dir, "no_cache_drop": args.no_cache_drop, diff --git a/bench/benchmark_config.py b/bench/benchmark_config.py index 37da1aaa1a71..783d2b639b54 100644 --- a/bench/benchmark_config.py +++ b/bench/benchmark_config.py @@ -25,6 +25,7 @@ class BenchmarkConfig: """ # Benchmark metadata + full_ibd: bool start_height: int runs: int @@ -78,8 +79,13 @@ def from_toml(cls, path: Path) -> BenchmarkConfig: instrumented = bitcoind.pop("instrumented", {}) instrumented_debug = instrumented.get("debug", []) + # Full IBD mode: skip datadir copy, sync from genesis + full_ibd = benchmark.get("full_ibd", False) + start_height = 0 if full_ibd else benchmark.get("start_height", 0) + config = cls( - start_height=benchmark.get("start_height", 0), + full_ibd=full_ibd, + start_height=start_height, runs=benchmark.get("runs", 3), matrix=matrix, bitcoind_args=bitcoind, @@ -88,6 +94,8 @@ def from_toml(cls, path: Path) -> BenchmarkConfig: ) logger.info(f"Loaded benchmark config from {path}") + if config.full_ibd: + logger.info(" Mode: Full IBD (fresh sync from genesis)") logger.info(f" Start height: {config.start_height}, Runs: {config.runs}") if config.matrix: logger.info(f" Matrix parameters: {list(config.matrix.keys())}") @@ -217,6 +225,7 @@ def to_dict(self) -> dict[str, Any]: This captures the config for logging with results. """ result: dict[str, Any] = { + "full_ibd": self.full_ibd, "start_height": self.start_height, "runs": self.runs, "command_template": self.generate_command_template(), From e1d867e1df3f77c0e399cb66023d93d919594a3a Mon Sep 17 00:00:00 2001 From: will Date: Tue, 24 Feb 2026 12:17:08 +0000 Subject: [PATCH 26/46] Generate static debug.log plots during report generation Run LogParser + PlotGenerator from bench/analyze.py during artifact copying to produce static PNG charts from debug.log files. This pre-generates the same 11 chart types that were previously rendered client-side via JavaScript. Changes to report.py: - Import HAS_MATPLOTLIB, LogParser, PlotGenerator from bench.analyze - _copy_network_artifacts: generate plots after each debug.log with "{network}-{name}" prefix (e.g. "450-uninstrumented-pr") - _copy_artifacts: generate plots for single-directory mode, including when input_dir == output_dir - _prepare_graphs_data: add "plots" key with relative paths to PNGs - generate(): reorder to copy artifacts before HTML rendering so _prepare_graphs_data can find the generated plot files Plot generation is guarded by HAS_MATPLOTLIB for graceful fallback when matplotlib is unavailable. --- bench/report.py | 73 ++++++++++++++++++++++++++++++++++++------------- 1 file changed, 54 insertions(+), 19 deletions(-) diff --git a/bench/report.py b/bench/report.py index e80c912e3c9f..2276ca40cca8 100644 --- a/bench/report.py +++ b/bench/report.py @@ -14,6 +14,7 @@ from pathlib import Path from typing import Any +from bench.analyze import HAS_MATPLOTLIB, LogParser, PlotGenerator from bench.nightly import ( NightlyHistory, series_color_index, @@ -263,6 +264,9 @@ def generate( # Parse results runs = self._parse_results(data) + # Copy artifacts first so plots are available for template rendering + self._copy_artifacts(input_dir, output_dir) + # Generate HTML (no nightly comparison in single-directory mode) html = self._generate_html(runs, {}, title, input_dir, output_dir) @@ -271,9 +275,6 @@ def generate( index_file.write_text(html) logger.info(f"Generated report: {index_file}") - # Copy artifacts (flamegraphs, plots) - self._copy_artifacts(input_dir, output_dir) - return ReportResult( output_dir=output_dir, index_file=index_file, @@ -428,6 +429,20 @@ def _copy_network_artifacts( shutil.copyfileobj(f_in, f_out) logger.debug(f"Compressed {log.name} as {dest.name}") + if HAS_MATPLOTLIB: + name = log.name.removesuffix("-debug.log") + prefix = f"{network}-{name}" + plots_dir = output_dir / "plots" + plots_dir.mkdir(parents=True, exist_ok=True) + try: + data = LogParser().parse_file(log) + plots = PlotGenerator(prefix, plots_dir).generate_all(data) + logger.info(f"Generated {len(plots)} plots for {prefix}") + except Exception: + logger.warning( + f"Failed to generate plots for {prefix}", exc_info=True + ) + def _generate_html( self, runs: list[BenchmarkRun], @@ -555,7 +570,16 @@ def _prepare_graphs_data( elif (input_dir / non_prefixed_log).exists(): debug_log_name = non_prefixed_log - if not flamegraph_name and not debug_log_name: + plots = [] + plots_dir = output_dir / "plots" + if plots_dir.exists(): + for prefix in [f"{network}-{name}", name]: + plot_files = sorted(plots_dir.glob(f"{prefix}-*.png")) + if plot_files: + plots = [f"plots/{p.name}" for p in plot_files] + break + + if not flamegraph_name and not debug_log_name and not plots: continue display_label = f"{network} - {name}" if network != "default" else name @@ -565,6 +589,7 @@ def _prepare_graphs_data( "label": display_label, "flamegraph": flamegraph_name, "debug_log": debug_log_name, + "plots": plots, } ) @@ -572,24 +597,34 @@ def _prepare_graphs_data( def _copy_artifacts(self, input_dir: Path, output_dir: Path) -> None: """Copy flamegraphs and gzip debug logs to output directory.""" - # Skip if input and output are the same directory - if input_dir.resolve() == output_dir.resolve(): - logger.debug("Input and output are the same directory, skipping copy") - return + same_dir = input_dir.resolve() == output_dir.resolve() - # Copy flamegraphs - for svg in input_dir.glob("*-flamegraph.svg"): - dest = output_dir / svg.name - shutil.copy2(svg, dest) - logger.debug(f"Copied {svg.name}") + if not same_dir: + for svg in input_dir.glob("*-flamegraph.svg"): + dest = output_dir / svg.name + shutil.copy2(svg, dest) + logger.debug(f"Copied {svg.name}") - # Gzip and copy debug logs for log in input_dir.glob("*-debug.log"): - dest = output_dir / f"{log.name}.gz" - with open(log, "rb") as f_in: - with gzip.open(dest, "wb") as f_out: - shutil.copyfileobj(f_in, f_out) - logger.debug(f"Compressed {log.name} as {dest.name}") + if not same_dir: + dest = output_dir / f"{log.name}.gz" + with open(log, "rb") as f_in: + with gzip.open(dest, "wb") as f_out: + shutil.copyfileobj(f_in, f_out) + logger.debug(f"Compressed {log.name} as {dest.name}") + + if HAS_MATPLOTLIB: + name = log.name.removesuffix("-debug.log") + plots_dir = output_dir / "plots" + plots_dir.mkdir(parents=True, exist_ok=True) + try: + data = LogParser().parse_file(log) + plots = PlotGenerator(name, plots_dir).generate_all(data) + logger.info(f"Generated {len(plots)} plots for {name}") + except Exception: + logger.warning( + f"Failed to generate plots for {name}", exc_info=True + ) class ReportPhase: From e7f83170f909b9f7982f3f3f94d11d6d731989d5 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 24 Feb 2026 12:20:43 +0000 Subject: [PATCH 27/46] Replace client-side debug.log charts with static images The pr-report.html template previously included debug-log-charts.html which fetched multi-hundred-MB debug.log.gz files in the browser, decompressed them with pako.js, parsed every line, and rendered 11 Plotly charts client-side. This made report pages unresponsive. Now that report.py pre-generates the charts as static PNGs: - pr-report.html: replace the debug-log-charts.html include with an img loop over graph.plots, using loading="lazy" - debug-log-charts.html: delete (344 lines of client-side JS) - base.html: remove pako.js and Plotly CDN scripts (both are independently included by pr-chart.html and nightly-chart.html via their own script tags) The debug.log download link is preserved. --- bench/templates/base.html | 2 - .../templates/partials/debug-log-charts.html | 344 ------------------ bench/templates/pr-report.html | 8 +- 3 files changed, 5 insertions(+), 349 deletions(-) delete mode 100644 bench/templates/partials/debug-log-charts.html diff --git a/bench/templates/base.html b/bench/templates/base.html index 22df32c8c2d5..d5f5f1ad5c57 100644 --- a/bench/templates/base.html +++ b/bench/templates/base.html @@ -5,8 +5,6 @@ {% block title %}Benchcoin{% endblock %} - - {% block head %}{% endblock %} diff --git a/bench/templates/partials/debug-log-charts.html b/bench/templates/partials/debug-log-charts.html deleted file mode 100644 index c4f7e475bb0e..000000000000 --- a/bench/templates/partials/debug-log-charts.html +++ /dev/null @@ -1,344 +0,0 @@ -
    -
    Loading charts from debug.log...
    -
    - - diff --git a/bench/templates/pr-report.html b/bench/templates/pr-report.html index 56e9eac19664..05ecf1fa706e 100644 --- a/bench/templates/pr-report.html +++ b/bench/templates/pr-report.html @@ -106,9 +106,11 @@

    {{ graph.label }}

    Download debug.log
    - {% set chart_id = graph.label | replace(' ', '-') | replace('.', '') | lower %} - {% set debug_log_url = graph.debug_log %} - {% include 'partials/debug-log-charts.html' %} + {% endif %} + {% if graph.plots %} + {% for plot in graph.plots %} + {{ graph.label }} + {% endfor %} {% endif %} {% endfor %} From 0d4d79334a401d83bb45409c96c3c058b20e7b3f Mon Sep 17 00:00:00 2001 From: will Date: Tue, 24 Feb 2026 20:45:32 +0000 Subject: [PATCH 28/46] Update bench/README.md to reflect current CLI interface Rewrite to document the TOML config + matrix entry workflow, removing stale references to the old two-commit comparison CLI, --datadir requirement, profiles, and BENCH_DATADIR env var. --- bench/README.md | 206 ++++++++++++++++++++---------------------------- 1 file changed, 86 insertions(+), 120 deletions(-) diff --git a/bench/README.md b/bench/README.md index ca0d011303de..1e959e463fa7 100644 --- a/bench/README.md +++ b/bench/README.md @@ -1,23 +1,25 @@ # Benchcoin -A CLI for benchmarking Bitcoin Core IBD. +A CLI for benchmarking Bitcoin Core IBD (Initial Block Download). ## Quick Start ```bash # Quick smoke test on signet (requires nix) -nix develop --command python3 bench.py --profile quick full \ - --chain signet --datadir /path/to/signet/datadir HEAD~1 HEAD +nix develop --command python3 bench.py build HEAD:test +nix develop --command python3 bench.py run \ + --benchmark-config bench/configs/test-signet.toml \ + --matrix-entry 450 \ + --output-dir ./output \ + test:./binaries/test/bitcoind -# Or use just (wraps nix develop) -just quick HEAD~1 HEAD /path/to/signet/datadir +# Or use just +just test-uninstrumented HEAD ``` ## Requirements - **Nix** with flakes enabled (provides hyperfine, flamegraph, etc.) -- A blockchain datadir snapshot to benchmark against -- Two git commits to compare Optional (auto-detected, gracefully degrades without): - `/run/wrappers/bin/drop-caches` (NixOS) - clears page cache between runs @@ -34,42 +36,41 @@ Global Options: --dry-run Show what would run Commands: - build Build bitcoind at two commits - run Run benchmark (requires pre-built binaries) + build Build bitcoind at a commit + run Run benchmark (requires pre-built binary + TOML config) analyze Generate plots from debug.log report Generate HTML report - full Complete pipeline: build β†’ run β†’ analyze + nightly Manage nightly history + generate chart ``` ### build -Build bitcoind binaries at two commits for comparison: +Build a bitcoind binary at a commit: ```bash -python3 bench.py build HEAD~1 HEAD -python3 bench.py build --binaries-dir /tmp/bins abc123 def456 -python3 bench.py build --skip-existing HEAD~1 HEAD # reuse existing +python3 bench.py build HEAD:pr +python3 bench.py build -o /tmp/bins abc123:test +python3 bench.py build --skip-existing HEAD:pr ``` ### run -Run hyperfine benchmark comparing two pre-built binaries: +Run a benchmark using a TOML config and matrix entry: ```bash -python3 bench.py run --datadir /data/snapshot HEAD~1 HEAD -python3 bench.py run --instrumented --datadir /data/snapshot HEAD~1 HEAD +python3 bench.py run \ + --benchmark-config bench/configs/pr.toml \ + --matrix-entry 450-uninstrumented \ + --output-dir ./output \ + pr:./binaries/pr/bitcoind ``` Options: -- `--datadir PATH` - Source blockchain snapshot (required) -- `--tmp-datadir PATH` - Working directory (default: ./bench-output/tmp-datadir) -- `--stop-height N` - Block height to sync to -- `--dbcache N` - Database cache in MB -- `--runs N` - Number of iterations (default: 3, forced to 1 if instrumented) -- `--instrumented` - Enable flamegraph profiling and debug logging -- `--connect ADDR` - P2P node to sync from (empty = public network) -- `--chain {main,signet,testnet,regtest}` - Which chain -- `--no-cache-drop` - Don't clear page cache between runs +- `--benchmark-config PATH` - TOML config file (required) +- `--matrix-entry NAME` - Matrix entry to run (required) +- `--tmp-datadir PATH` - Working directory for benchmark runs +- `-o, --output-dir PATH` - Output directory for results +- `--no-cache-drop` - Skip clearing page cache between runs ### analyze @@ -79,95 +80,75 @@ Generate plots from a debug.log file: python3 bench.py analyze abc123 /path/to/debug.log --output-dir ./plots ``` -Generates PNG plots for: -- Block height vs time -- Cache size vs height/time -- Transaction count vs height -- LevelDB compaction events -- CoinDB write batches +Generates PNG plots for block processing rate, cache usage, transaction counts, LevelDB compaction, and CoinDB write batches. ### report -Generate HTML report from benchmark results: +Generate an HTML report from benchmark results: ```bash +# Single directory python3 bench.py report ./bench-output ./report + +# Multi-network (CI mode) +python3 bench.py report \ + --network 450-uninstrumented:./results/450 \ + --network 32000-uninstrumented:./results/32000 \ + --nightly-history ./nightly-history.json \ + --pr-number 123 --run-id abc \ + ./output ``` -### full +### nightly -Run complete pipeline (build + run + analyze if instrumented): +Manage nightly benchmark history: ```bash -python3 bench.py --profile quick full --chain signet --datadir /tmp/signet HEAD~1 HEAD -python3 bench.py --profile full full --datadir /data/mainnet HEAD~1 HEAD -``` - -## Profiles +# Append a result +python3 bench.py nightly --history-file history.json append \ + results.json COMMIT 450 \ + --benchmark-config bench/configs/nightly.toml \ + --machine-specs machine-specs.json -Profiles set sensible defaults for common scenarios: +# Generate chart +python3 bench.py nightly --history-file history.json chart index.html +``` -| Profile | stop_height | runs | dbcache | connect | -|---------|-------------|------|---------|---------| -| quick | 1,500 | 1 | 450 | (public network) | -| full | 855,000 | 3 | 450 | (public network) | -| ci | 855,000 | 3 | 450 | 148.251.128.115:33333 | +## Benchmark Configs -Override any profile setting with CLI flags: +Benchmarks are driven by TOML config files in `bench/configs/`: -```bash -python3 bench.py --profile quick full --stop-height 5000 --datadir ... HEAD~1 HEAD -``` +| File | Chain | Matrix Entries | Use Case | +|------|-------|----------------|----------| +| `pr.toml` | mainnet | 450/32000 x uninstrumented/instrumented | PR comparison | +| `nightly.toml` | mainnet | 450, 32000 | Nightly baseline | +| `test-signet.toml` | signet | 450 | Quick local smoke test | -## Configuration +All configs use `full_ibd = true` (sync from genesis) with `runs = 1`. -Configuration is layered (lowest to highest priority): +### Matrix Expansion -1. Built-in defaults -2. `bench.toml` (in repo root) -3. Environment variables (`BENCH_DATADIR`, `BENCH_DBCACHE`, etc.) -4. CLI arguments - -### bench.toml +The `[bitcoind.matrix]` section defines parameter axes. Their cartesian product generates named entries: ```toml -[defaults] -chain = "main" -dbcache = 450 -stop_height = 855000 -runs = 3 - -[paths] -binaries_dir = "./binaries" -output_dir = "./bench-output" - -[profiles.quick] -stop_height = 1500 -runs = 1 -dbcache = 450 - -[profiles.ci] -connect = "148.251.128.115:33333" +[bitcoind.matrix] +dbcache = [450, 32000] +instrumentation = ["uninstrumented", "instrumented"] +# Produces: 450-uninstrumented, 450-instrumented, 32000-uninstrumented, 32000-instrumented ``` -### Environment Variables - -```bash -export BENCH_DATADIR=/data/snapshot -export BENCH_DBCACHE=1000 -export BENCH_STOP_HEIGHT=100000 -``` +Select one with `--matrix-entry`. ## Justfile Recipes -The justfile wraps common operations with `nix develop`: - ```bash -just quick HEAD~1 HEAD /path/to/datadir # Quick signet test -just full HEAD~1 HEAD /path/to/datadir # Full mainnet benchmark -just instrumented HEAD~1 HEAD /path/to/datadir # With flamegraphs -just build HEAD~1 HEAD # Build only -just run HEAD~1 HEAD /path/to/datadir # Run only (binaries must exist) +just test-instrumented HEAD # Signet smoke test with flamegraphs +just test-uninstrumented HEAD # Signet smoke test without profiling +just instrumented HEAD # Full instrumented benchmark +just build HEAD:pr # Build only +just run pr:./binaries/pr/bitcoind # Run with pre-built binary +just analyze COMMIT debug.log ./plots +just report ./input ./output --nightly-history ./nightly-history.json ``` ## Architecture @@ -176,59 +157,44 @@ just run HEAD~1 HEAD /path/to/datadir # Run only (binaries must exist) bench.py CLI entry point (argparse) bench/ β”œβ”€β”€ config.py Layered configuration (TOML + env + CLI) +β”œβ”€β”€ benchmark_config.py TOML config loader + matrix expansion β”œβ”€β”€ capabilities.py System capability detection β”œβ”€β”€ build.py Build phase (nix build) β”œβ”€β”€ benchmark.py Benchmark phase (hyperfine) β”œβ”€β”€ analyze.py Plot generation (matplotlib) β”œβ”€β”€ report.py HTML report generation +β”œβ”€β”€ nightly.py Nightly history + chart generation └── utils.py Git operations, datadir management ``` -### Capability Detection - -The tool auto-detects system capabilities and gracefully degrades: - -```python -from bench.capabilities import detect_capabilities -caps = detect_capabilities() -# caps.has_hyperfine, caps.can_drop_caches, etc. -``` - -Missing optional features emit warnings but don't fail: - -``` -WARNING: drop-caches not available - cache won't be cleared between runs -``` - -Missing required features (hyperfine, flamegraph for instrumented) cause errors. - ### Hyperfine Integration -The benchmark phase generates temporary shell scripts for hyperfine hooks: +The benchmark phase generates shell scripts for hyperfine hooks: -- `setup` - Clean tmp datadir (once before all runs) -- `prepare` - Copy snapshot, drop caches, clean logs (before each run) -- `cleanup` - Clean tmp datadir (after all runs per command) -- `conclude` - Collect flamegraph/logs (instrumented only, after each run) +- `setup` - Create tmp datadir (once before all runs) +- `prepare` - Create fresh datadir, drop caches, clean logs (before each run) +- `cleanup` - Clean tmp datadir (after all runs) +- `conclude` - Collect flamegraph/logs (instrumented only) ### Instrumented Mode -When `--instrumented` is set: +When `instrumentation = "instrumented"` in the matrix: 1. Wraps bitcoind in `flamegraph` for CPU profiling -2. Enables debug logging: `-debug=coindb -debug=leveldb -debug=bench -debug=validation` +2. Enables debug logging: `coindb`, `leveldb`, `bench`, `validation` 3. Forces `runs=1` (profiling overhead makes multiple runs pointless) 4. Generates flamegraph SVGs and performance plots ## CI Integration -GitHub Actions workflows call bench.py directly (already in nix develop): +GitHub Actions workflows call bench.py directly: ```yaml - run: | - nix develop --command python3 bench.py build \ - --binaries-dir ${{ runner.temp }}/binaries \ - $BASE_SHA $HEAD_SHA + nix develop --command python3 bench.py run \ + --benchmark-config bench/configs/pr.toml \ + --matrix-entry ${{ matrix.name }} \ + --tmp-datadir ${{ runner.temp }}/datadir \ + --output-dir ${{ runner.temp }}/output \ + pr:${{ runner.temp }}/binaries/pr/bitcoind ``` - -CI-specific paths and the dedicated sync node are configured via `--profile ci`. From e3a17a7cacf9dc159420c9197f42435400704761 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 24 Feb 2026 20:58:46 +0000 Subject: [PATCH 29/46] Stop publishing debug.log.gz to gh-pages, link to CI artifacts instead Debug logs were consuming 388MB on gh-pages. They are already uploaded as CI artifacts with 90-day retention during benchmark runs. - Remove gzip compression and copying of debug logs in report generation - Remove debug log extraction in publish-results workflow - Replace per-graph "Download debug.log" links with a single link to the CI run page where artifacts can be downloaded - Keep matplotlib plot generation from debug logs (plots are still generated during report phase, just the raw logs aren't published) --- .github/workflows/publish-results.yml | 5 --- bench/report.py | 48 ++++++++------------------- bench/templates/pr-report.html | 13 ++++---- 3 files changed, 19 insertions(+), 47 deletions(-) diff --git a/.github/workflows/publish-results.yml b/.github/workflows/publish-results.yml index 59dd6369741c..d3d364199c1b 100644 --- a/.github/workflows/publish-results.yml +++ b/.github/workflows/publish-results.yml @@ -49,11 +49,6 @@ jobs: cp -r "flamegraph-${network}"/* "${network}-results/" 2>/dev/null || true fi - # Copy debug logs into network results directory - if [ -d "debug-logs-${network}" ]; then - cp -r "debug-logs-${network}"/* "${network}-results/" 2>/dev/null || true - fi - # Keep metadata separate for extraction if [ -d "run-metadata-${network}" ]; then mkdir -p "${network}-metadata" diff --git a/bench/report.py b/bench/report.py index 2276ca40cca8..ce24335aeafb 100644 --- a/bench/report.py +++ b/bench/report.py @@ -5,7 +5,6 @@ from __future__ import annotations -import gzip import json import logging import shutil @@ -194,7 +193,8 @@ def generate_multi_network( # Generate HTML html = self._generate_html( - all_runs, nightly_comparison, full_title, output_dir, output_dir, commit + all_runs, nightly_comparison, full_title, output_dir, output_dir, commit, + run_id, ) # Write report @@ -421,15 +421,9 @@ def _copy_network_artifacts( shutil.copy2(svg, dest) logger.debug(f"Copied {svg.name} as {dest.name}") - # Gzip and copy debug logs with network prefix - for log in input_dir.glob("*-debug.log"): - dest = output_dir / f"{network}-{log.name}.gz" - with open(log, "rb") as f_in: - with gzip.open(dest, "wb") as f_out: - shutil.copyfileobj(f_in, f_out) - logger.debug(f"Compressed {log.name} as {dest.name}") - - if HAS_MATPLOTLIB: + # Generate plots from debug logs (logs themselves are available as CI artifacts) + if HAS_MATPLOTLIB: + for log in input_dir.glob("*-debug.log"): name = log.name.removesuffix("-debug.log") prefix = f"{network}-{name}" plots_dir = output_dir / "plots" @@ -451,6 +445,7 @@ def _generate_html( input_dir: Path, output_dir: Path, commit: str | None = None, + run_id: str | None = None, ) -> str: """Generate the HTML report.""" sorted_runs = sorted(runs, key=lambda r: r.network) @@ -481,6 +476,8 @@ def _generate_html( if pr_chart_data and self.nightly_history: nightly_chart_data = self.nightly_history.get_chart_data() + ci_run_url = f"{self.repo_url}/actions/runs/{run_id}" if run_id else None + return render_template( "pr-report.html", title=title, @@ -490,6 +487,7 @@ def _generate_html( nightly_chart_data=nightly_chart_data, graphs=graphs, repo_url=self.repo_url, + ci_run_url=ci_run_url, ) def _prepare_nightly_data( @@ -558,18 +556,6 @@ def _prepare_graphs_data( elif (input_dir / non_prefixed).exists(): flamegraph_name = non_prefixed - debug_log_name = None - network_prefixed_log_gz = f"{network}-{name}-debug.log.gz" - network_prefixed_log = f"{network}-{name}-debug.log" - non_prefixed_log = f"{name}-debug.log" - - if (output_dir / network_prefixed_log_gz).exists(): - debug_log_name = network_prefixed_log_gz - elif (output_dir / network_prefixed_log).exists(): - debug_log_name = network_prefixed_log - elif (input_dir / non_prefixed_log).exists(): - debug_log_name = non_prefixed_log - plots = [] plots_dir = output_dir / "plots" if plots_dir.exists(): @@ -579,7 +565,7 @@ def _prepare_graphs_data( plots = [f"plots/{p.name}" for p in plot_files] break - if not flamegraph_name and not debug_log_name and not plots: + if not flamegraph_name and not plots: continue display_label = f"{network} - {name}" if network != "default" else name @@ -588,7 +574,6 @@ def _prepare_graphs_data( { "label": display_label, "flamegraph": flamegraph_name, - "debug_log": debug_log_name, "plots": plots, } ) @@ -596,7 +581,7 @@ def _prepare_graphs_data( return graphs def _copy_artifacts(self, input_dir: Path, output_dir: Path) -> None: - """Copy flamegraphs and gzip debug logs to output directory.""" + """Copy flamegraphs and generate plots from debug logs.""" same_dir = input_dir.resolve() == output_dir.resolve() if not same_dir: @@ -605,15 +590,8 @@ def _copy_artifacts(self, input_dir: Path, output_dir: Path) -> None: shutil.copy2(svg, dest) logger.debug(f"Copied {svg.name}") - for log in input_dir.glob("*-debug.log"): - if not same_dir: - dest = output_dir / f"{log.name}.gz" - with open(log, "rb") as f_in: - with gzip.open(dest, "wb") as f_out: - shutil.copyfileobj(f_in, f_out) - logger.debug(f"Compressed {log.name} as {dest.name}") - - if HAS_MATPLOTLIB: + if HAS_MATPLOTLIB: + for log in input_dir.glob("*-debug.log"): name = log.name.removesuffix("-debug.log") plots_dir = output_dir / "plots" plots_dir.mkdir(parents=True, exist_ok=True) diff --git a/bench/templates/pr-report.html b/bench/templates/pr-report.html index 05ecf1fa706e..9ebfecbc2f34 100644 --- a/bench/templates/pr-report.html +++ b/bench/templates/pr-report.html @@ -94,19 +94,18 @@

    Performance Trend

    {% if graphs %}

    Flamegraphs and Charts

    + {% if ci_run_url %} +

    + Debug logs available as CI artifacts + (expires after 90 days) +

    + {% endif %} {% for graph in graphs %}

    {{ graph.label }}

    {% if graph.flamegraph %} {% endif %} - {% if graph.debug_log %} - - {% endif %} {% if graph.plots %} {% for plot in graph.plots %} {{ graph.label }} From 31d067f00acc003df73b6dc5010fd87774e31ba6 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 24 Feb 2026 21:17:20 +0000 Subject: [PATCH 30/46] Wait for Pages deployment before commenting on PR The PR comment with result links was posted before GitHub Pages finished deploying, leading to broken links. Add a wait-for-pages job that polls for the pages-build-deployment run matching our exact gh-pages commit, then blocks until it completes. --- .github/workflows/publish-results.yml | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/.github/workflows/publish-results.yml b/.github/workflows/publish-results.yml index d3d364199c1b..44558fdf1fca 100644 --- a/.github/workflows/publish-results.yml +++ b/.github/workflows/publish-results.yml @@ -18,6 +18,7 @@ jobs: comparison: ${{ steps.generate.outputs.comparison }} pr-number: ${{ steps.metadata.outputs.pr-number }} result-url: ${{ steps.generate.outputs.result-url }} + pages-commit: ${{ steps.push-pages.outputs.pages-commit }} steps: - uses: actions/checkout@v4 with: @@ -135,6 +136,7 @@ jobs: path: results - name: Commit and push to gh-pages + id: push-pages run: | git config --global user.name "github-actions[bot]" git config --global user.email "github-actions[bot]@users.noreply.github.com" @@ -143,10 +145,31 @@ jobs: git add results/ git commit -m "Update benchmark results from run ${{ github.event.workflow_run.id }}" git push origin gh-pages + echo "pages-commit=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT - comment-pr: + wait-for-pages: needs: build runs-on: ubuntu-latest + steps: + - name: Wait for GitHub Pages deployment + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + COMMIT="${{ needs.build.outputs.pages-commit }}" + for i in $(seq 1 60); do + RUN_ID=$(gh run list -R "${{ github.repository }}" -w "pages-build-deployment" --commit "$COMMIT" -L 1 --json databaseId -q '.[0].databaseId') + if [ -n "$RUN_ID" ]; then + echo "Found pages-build-deployment run ${RUN_ID} for commit ${COMMIT}" + gh run watch "$RUN_ID" -R "${{ github.repository }}" + exit 0 + fi + sleep 5 + done + echo "::warning::Could not find pages-build-deployment run for commit ${COMMIT}" + + comment-pr: + needs: [build, wait-for-pages] + runs-on: ubuntu-latest permissions: pull-requests: write actions: read From 5c8794985a773ea3267b246157dc751bf66360eb Mon Sep 17 00:00:00 2001 From: will Date: Tue, 24 Feb 2026 21:23:53 +0000 Subject: [PATCH 31/46] Sort PR results index numerically instead of lexicographically --- bench/README.md | 23 ++++++++++++++--------- bench/report.py | 5 ++++- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/bench/README.md b/bench/README.md index 1e959e463fa7..e41bc7256f4b 100644 --- a/bench/README.md +++ b/bench/README.md @@ -10,16 +10,18 @@ nix develop --command python3 bench.py build HEAD:test nix develop --command python3 bench.py run \ --benchmark-config bench/configs/test-signet.toml \ --matrix-entry 450 \ + --datadir /path/to/signet-datadir \ --output-dir ./output \ test:./binaries/test/bitcoind # Or use just -just test-uninstrumented HEAD +just test-uninstrumented HEAD /path/to/signet-datadir ``` ## Requirements - **Nix** with flakes enabled (provides hyperfine, flamegraph, etc.) +- A blockchain datadir snapshot to benchmark against Optional (auto-detected, gracefully degrades without): - `/run/wrappers/bin/drop-caches` (NixOS) - clears page cache between runs @@ -61,6 +63,7 @@ Run a benchmark using a TOML config and matrix entry: python3 bench.py run \ --benchmark-config bench/configs/pr.toml \ --matrix-entry 450-uninstrumented \ + --datadir /data/pruned-840k \ --output-dir ./output \ pr:./binaries/pr/bitcoind ``` @@ -68,6 +71,7 @@ python3 bench.py run \ Options: - `--benchmark-config PATH` - TOML config file (required) - `--matrix-entry NAME` - Matrix entry to run (required) +- `--datadir PATH` - Blockchain datadir snapshot to copy for each run - `--tmp-datadir PATH` - Working directory for benchmark runs - `-o, --output-dir PATH` - Output directory for results - `--no-cache-drop` - Skip clearing page cache between runs @@ -124,7 +128,7 @@ Benchmarks are driven by TOML config files in `bench/configs/`: | `nightly.toml` | mainnet | 450, 32000 | Nightly baseline | | `test-signet.toml` | signet | 450 | Quick local smoke test | -All configs use `full_ibd = true` (sync from genesis) with `runs = 1`. +Configs use `start_height = 840000` (resuming from a pruned snapshot) with `runs = 2` (except signet which starts from 0 with `runs = 1`). ### Matrix Expansion @@ -142,11 +146,11 @@ Select one with `--matrix-entry`. ## Justfile Recipes ```bash -just test-instrumented HEAD # Signet smoke test with flamegraphs -just test-uninstrumented HEAD # Signet smoke test without profiling -just instrumented HEAD # Full instrumented benchmark -just build HEAD:pr # Build only -just run pr:./binaries/pr/bitcoind # Run with pre-built binary +just test-instrumented HEAD /path/to/datadir # Signet smoke test with flamegraphs +just test-uninstrumented HEAD /path/to/datadir # Signet smoke test without profiling +just instrumented HEAD /path/to/datadir # Full instrumented benchmark +just build HEAD:pr # Build only +just run /path/to/datadir pr:./binaries/pr/bitcoind # Run with pre-built binary just analyze COMMIT debug.log ./plots just report ./input ./output --nightly-history ./nightly-history.json ``` @@ -171,8 +175,8 @@ bench/ The benchmark phase generates shell scripts for hyperfine hooks: -- `setup` - Create tmp datadir (once before all runs) -- `prepare` - Create fresh datadir, drop caches, clean logs (before each run) +- `setup` - Clean tmp datadir (once before all runs) +- `prepare` - Copy snapshot, drop caches, clean logs (before each run) - `cleanup` - Clean tmp datadir (after all runs) - `conclude` - Collect flamegraph/logs (instrumented only) @@ -194,6 +198,7 @@ GitHub Actions workflows call bench.py directly: nix develop --command python3 bench.py run \ --benchmark-config bench/configs/pr.toml \ --matrix-entry ${{ matrix.name }} \ + --datadir $ORIGINAL_DATADIR \ --tmp-datadir ${{ runner.temp }}/datadir \ --output-dir ${{ runner.temp }}/output \ pr:${{ runner.temp }}/binaries/pr/bitcoind diff --git a/bench/report.py b/bench/report.py index ce24335aeafb..2f4f4c26d74c 100644 --- a/bench/report.py +++ b/bench/report.py @@ -295,7 +295,10 @@ def generate_index( results = [] if results_dir.exists(): - for pr_dir in sorted(results_dir.iterdir()): + for pr_dir in sorted( + results_dir.iterdir(), + key=lambda d: int(d.name.replace("pr-", "")) if d.name.startswith("pr-") else 0, + ): if pr_dir.is_dir() and pr_dir.name.startswith("pr-"): pr_num = pr_dir.name.replace("pr-", "") pr_runs = [] From 4f210f3ba69c60913c3a9f47c7a692e79b98ebba Mon Sep 17 00:00:00 2001 From: will Date: Wed, 25 Feb 2026 08:41:54 +0000 Subject: [PATCH 32/46] set prune height to 1_000_000MB MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, prune=10000 was causing flushes of the UTXO set when block pruning was taking please, resulting in logs like: ❯ zcat 32000-instrumented-pr-debug.log.gz | rg UTXO 2026-02-12T07:22:57Z * Using 31990.0 MiB for in-memory UTXO set (plus up to 286.1 MiB of unused mempool space) 2026-02-12T07:28:51Z [warning] Flushing large (2 GiB) UTXO set to disk, it may take several minutes 2026-02-12T07:33:10Z [warning] Flushing large (3 GiB) UTXO set to disk, it may take several minutes 2026-02-12T07:37:23Z [warning] Flushing large (4 GiB) UTXO set to disk, it may take several minutes 2026-02-12T07:42:03Z [warning] Flushing large (4 GiB) UTXO set to disk, it may take several minutes 2026-02-12T07:46:34Z [warning] Flushing large (5 GiB) UTXO set to disk, it may take several minutes 2026-02-12T07:51:10Z [warning] Flushing large (6 GiB) UTXO set to disk, it may take several minutes 2026-02-12T07:55:57Z [warning] Flushing large (7 GiB) UTXO set to disk, it may take several minutes 2026-02-12T08:00:35Z [warning] Flushing large (8 GiB) UTXO set to disk, it may take several minutes 2026-02-12T08:05:16Z [warning] Flushing large (8 GiB) UTXO set to disk, it may take several minutes 2026-02-12T08:10:00Z [warning] Flushing large (8 GiB) UTXO set to disk, it may take several minutes 2026-02-12T08:14:36Z [warning] Flushing large (8 GiB) UTXO set to disk, it may take several minutes 2026-02-12T08:16:47Z [warning] Flushing large (8 GiB) UTXO set to disk, it may take several minutes and generally interrupting benchmarking. Remove this effect by setting prune to such a high value it will never trigger. Prune is **required** to permit us to continue syncing from a pruned datadir. --- bench/benchmark_config.py | 2 +- bench/configs/nightly.toml | 2 +- bench/configs/pr.toml | 2 +- bench/configs/test-signet.toml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bench/benchmark_config.py b/bench/benchmark_config.py index 783d2b639b54..c975c146f736 100644 --- a/bench/benchmark_config.py +++ b/bench/benchmark_config.py @@ -55,7 +55,7 @@ def from_toml(cls, path: Path) -> BenchmarkConfig: stopatheight = 855000 chain = "main" connect = "..." - prune = 10000 + prune = 1000000 daemon = false printtoconsole = false diff --git a/bench/configs/nightly.toml b/bench/configs/nightly.toml index 0780317c9022..c07a7f04e091 100644 --- a/bench/configs/nightly.toml +++ b/bench/configs/nightly.toml @@ -14,7 +14,7 @@ runs = 2 stopatheight = 900000 chain = "main" connect = "148.251.128.115:33333" # accepts whitelisted ip addrs only -prune = 10000 +prune = 1000000 daemon = false printtoconsole = false diff --git a/bench/configs/pr.toml b/bench/configs/pr.toml index d3ed08d8b30a..3513deba34ec 100644 --- a/bench/configs/pr.toml +++ b/bench/configs/pr.toml @@ -14,7 +14,7 @@ runs = 2 stopatheight = 900000 chain = "main" connect = "148.251.128.115:33333" -prune = 10000 +prune = 1000000 daemon = false printtoconsole = false diff --git a/bench/configs/test-signet.toml b/bench/configs/test-signet.toml index dc378fcf1d60..8e66414c64f3 100644 --- a/bench/configs/test-signet.toml +++ b/bench/configs/test-signet.toml @@ -13,7 +13,7 @@ runs = 1 [bitcoind] stopatheight = 10000 chain = "signet" -prune = 1000 +prune = 1000000 daemon = false printtoconsole = false From 9600279edcacd9df855b344f3845d6c212b37574 Mon Sep 17 00:00:00 2001 From: will Date: Thu, 26 Feb 2026 14:38:31 +0000 Subject: [PATCH 33/46] Include prune in nightly chart series key --- bench/nightly.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/bench/nightly.py b/bench/nightly.py index 362fd697abd4..9da3645dd0a9 100644 --- a/bench/nightly.py +++ b/bench/nightly.py @@ -94,8 +94,8 @@ def _extract_cpu_short_name(cpu_model: str) -> str: def series_key(result: "NightlyResult") -> str: """Generate unique series key from machine specs and config. - Format: {cpu_short}|{ram}GB|{disk}|{kernel}|db{dbcache}|{start}-{stop} - Example: ryzen77008core|64GB|nvme|6.6|db450|840000-900000 + Format: {cpu_short}|{ram}GB|{disk}|{kernel}|db{dbcache}|prune{prune}|{start}-{stop} + Example: ryzen77008core|64GB|nvme|6.6|db450|prune1000000|840000-900000 """ machine = result.machine or {} config = result.config or {} @@ -107,10 +107,11 @@ def series_key(result: "NightlyResult") -> str: kernel = _normalize_kernel(machine.get("os_kernel", "unknown")) dbcache = bitcoind.get("dbcache", result.dbcache) + prune = bitcoind.get("prune", 0) start = config.get("start_height", 0) stop = bitcoind.get("stopatheight", 0) - return f"{cpu}|{ram}GB|{disk}|{kernel}|db{dbcache}|{start}-{stop}" + return f"{cpu}|{ram}GB|{disk}|{kernel}|db{dbcache}|prune{prune}|{start}-{stop}" def series_label(result: "NightlyResult") -> str: @@ -130,8 +131,10 @@ def series_label(result: "NightlyResult") -> str: block_range = f"{start}-{stop}" if start and stop else "?-?" dbcache = result.dbcache + prune = bitcoind.get("prune", 0) - return f"{arch}, {cpu_short}, {ram_str}, {block_range}, dbcache {dbcache}" + prune_str = f", prune {prune}" if prune else "" + return f"{arch}, {cpu_short}, {ram_str}, {block_range}, dbcache {dbcache}{prune_str}" @dataclass From afe90d31ad2f03a8608c018b7727e314f232814a Mon Sep 17 00:00:00 2001 From: will Date: Thu, 26 Feb 2026 14:53:09 +0000 Subject: [PATCH 34/46] Fix numeric sort crash on pr-main directory --- bench/report.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bench/report.py b/bench/report.py index 2f4f4c26d74c..02c28846a4cb 100644 --- a/bench/report.py +++ b/bench/report.py @@ -297,7 +297,9 @@ def generate_index( if results_dir.exists(): for pr_dir in sorted( results_dir.iterdir(), - key=lambda d: int(d.name.replace("pr-", "")) if d.name.startswith("pr-") else 0, + key=lambda d: (0, int(d.name.replace("pr-", ""))) + if d.name.startswith("pr-") and d.name.replace("pr-", "").isdigit() + else (1, d.name), ): if pr_dir.is_dir() and pr_dir.name.startswith("pr-"): pr_num = pr_dir.name.replace("pr-", "") From d16cb612366e1fb107d78f4004c60f6e11ffe5d4 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sat, 28 Feb 2026 20:01:42 +0000 Subject: [PATCH 35/46] Restore debug log extraction for PNG plot generation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ec9395419a removed debug log copying to stop publishing raw logs to gh-pages (388MB). But it also removed the extraction step that makes debug logs available during report generation, so matplotlib had no input files and PNG charts silently stopped appearing in PR reports. Restore the copy of debug-logs-${network} artifacts into the results directory before report generation. The raw logs are still not committed to gh-pages β€” only the small pre-rendered PNGs in plots/ are. --- .github/workflows/publish-results.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/publish-results.yml b/.github/workflows/publish-results.yml index 44558fdf1fca..b583388ba54e 100644 --- a/.github/workflows/publish-results.yml +++ b/.github/workflows/publish-results.yml @@ -50,6 +50,11 @@ jobs: cp -r "flamegraph-${network}"/* "${network}-results/" 2>/dev/null || true fi + # Copy debug logs into network results directory (needed for plot generation) + if [ -d "debug-logs-${network}" ]; then + cp -r "debug-logs-${network}"/* "${network}-results/" 2>/dev/null || true + fi + # Keep metadata separate for extraction if [ -d "run-metadata-${network}" ]; then mkdir -p "${network}-metadata" From 8e6d857a8633b080da3b18659858cff683fe7796 Mon Sep 17 00:00:00 2001 From: will Date: Wed, 4 Mar 2026 13:50:53 +0000 Subject: [PATCH 36/46] use local bitcoin node to test region network issues --- bench/config.py | 2 +- bench/configs/nightly.toml | 2 +- bench/configs/pr.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bench/config.py b/bench/config.py index 35af8f5c116c..93edac6e823c 100644 --- a/bench/config.py +++ b/bench/config.py @@ -40,7 +40,7 @@ "ci": { "stop_height": 855000, "runs": 3, - "connect": "148.251.128.115:33333", + "connect": "127.0.0.1:38333", }, } diff --git a/bench/configs/nightly.toml b/bench/configs/nightly.toml index c07a7f04e091..74c0a3307296 100644 --- a/bench/configs/nightly.toml +++ b/bench/configs/nightly.toml @@ -13,7 +13,7 @@ runs = 2 [bitcoind] stopatheight = 900000 chain = "main" -connect = "148.251.128.115:33333" # accepts whitelisted ip addrs only +connect = "127.0.0.1:38333" prune = 1000000 daemon = false printtoconsole = false diff --git a/bench/configs/pr.toml b/bench/configs/pr.toml index 3513deba34ec..38a56f4bd06e 100644 --- a/bench/configs/pr.toml +++ b/bench/configs/pr.toml @@ -13,7 +13,7 @@ runs = 2 [bitcoind] stopatheight = 900000 chain = "main" -connect = "148.251.128.115:33333" +connect = "127.0.0.1:38333" prune = 1000000 daemon = false printtoconsole = false From 23f01a2e809149ed5627da6c7f4706136e326a30 Mon Sep 17 00:00:00 2001 From: will Date: Thu, 5 Mar 2026 21:58:20 +0000 Subject: [PATCH 37/46] pin nightly benchmark job order: 450 before 32000 Replace the matrix strategy with explicit sequential jobs so the 450 benchmark always runs first in a consistent cache state, and the 32000 benchmark always runs second. --- .github/workflows/nightly-benchmark.yml | 59 +++++++++++++++++++++---- 1 file changed, 50 insertions(+), 9 deletions(-) diff --git a/.github/workflows/nightly-benchmark.yml b/.github/workflows/nightly-benchmark.yml index ea88c2ebde0c..d8d4ca821f94 100644 --- a/.github/workflows/nightly-benchmark.yml +++ b/.github/workflows/nightly-benchmark.yml @@ -71,12 +71,8 @@ jobs: name: machine-specs path: ${{ runner.temp }}/machine-specs.json - benchmark: + benchmark-450: needs: build - strategy: - matrix: - # Matrix entries from configs/nightly.toml: dbcache=[450,32000] - name: ["450", "32000"] runs-on: [self-hosted, linux, x64] timeout-minutes: 600 env: @@ -101,7 +97,7 @@ jobs: run: | nix develop --command python3 bench.py run \ --benchmark-config bench/configs/nightly.toml \ - --matrix-entry ${{ matrix.name }} \ + --matrix-entry 450 \ --datadir $ORIGINAL_DATADIR \ --tmp-datadir ${{ runner.temp }}/datadir \ --output-dir ${{ runner.temp }}/output \ @@ -110,18 +106,63 @@ jobs: - name: Upload results uses: actions/upload-artifact@v4 with: - name: result-nightly-${{ matrix.name }} + name: result-nightly-450 + path: ${{ runner.temp }}/output/results.json + + - name: Upload debug logs + uses: actions/upload-artifact@v4 + with: + name: debug-logs-nightly-450 + path: ${{ runner.temp }}/output/*-debug.log + if-no-files-found: ignore + + benchmark-32000: + needs: benchmark-450 + runs-on: [self-hosted, linux, x64] + timeout-minutes: 600 + env: + ORIGINAL_DATADIR: /data/pruned-840k + steps: + - name: Checkout repo + uses: actions/checkout@v4 + with: + fetch-depth: 1 + + - name: Download binaries + uses: actions/download-artifact@v4 + with: + name: nightly-binaries + path: ${{ runner.temp }}/binaries + + - name: Set binary permissions + run: | + chmod +x ${{ runner.temp }}/binaries/master/bitcoind + + - name: Run benchmark + run: | + nix develop --command python3 bench.py run \ + --benchmark-config bench/configs/nightly.toml \ + --matrix-entry 32000 \ + --datadir $ORIGINAL_DATADIR \ + --tmp-datadir ${{ runner.temp }}/datadir \ + --output-dir ${{ runner.temp }}/output \ + master:${{ runner.temp }}/binaries/master/bitcoind + + - name: Upload results + uses: actions/upload-artifact@v4 + with: + name: result-nightly-32000 path: ${{ runner.temp }}/output/results.json - name: Upload debug logs uses: actions/upload-artifact@v4 with: - name: debug-logs-nightly-${{ matrix.name }} + name: debug-logs-nightly-32000 path: ${{ runner.temp }}/output/*-debug.log if-no-files-found: ignore publish: - needs: benchmark + needs: benchmark-32000 runs-on: ubuntu-latest permissions: contents: write From 45be58005747567f71177a838e6d3aa3ec38ec88 Mon Sep 17 00:00:00 2001 From: will Date: Thu, 5 Mar 2026 22:00:21 +0000 Subject: [PATCH 38/46] run fstrim before each benchmark for consistent SSD performance Weekly fstrim.timer on the runner caused a ~25% speedup every Monday (Sunday night run). Running fstrim in the prepare script before each benchmark ensures consistent write performance regardless of when the system timer last ran. Follows the same suid wrapper pattern as drop-caches. --- bench/benchmark.py | 4 ++++ bench/capabilities.py | 26 ++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/bench/benchmark.py b/bench/benchmark.py index f6e8b696a666..ef7e151badbe 100644 --- a/bench/benchmark.py +++ b/bench/benchmark.py @@ -216,6 +216,10 @@ def _create_setup_script(self, tmp_datadir: Path) -> Path: f'mkdir -p "{tmp_datadir}"', f'rm -rf "{tmp_datadir}"/*', ] + + # TRIM SSD once before benchmarking for consistent write performance + if self.capabilities.can_fstrim: + commands.append(f'{self.capabilities.fstrim_path} "{tmp_datadir}"') return self._create_temp_script(commands, "setup") def _create_prepare_script( diff --git a/bench/capabilities.py b/bench/capabilities.py index e922e213b432..5cb67bb98b3b 100644 --- a/bench/capabilities.py +++ b/bench/capabilities.py @@ -18,6 +18,12 @@ "/usr/local/bin/drop-caches", ] +# Known paths for fstrim wrapper +FSTRIM_PATHS = [ + "/run/wrappers/bin/fstrim", + "/usr/local/bin/fstrim", +] + @dataclass class Capabilities: @@ -27,6 +33,10 @@ class Capabilities: can_drop_caches: bool drop_caches_path: str | None + # Disk TRIM + can_fstrim: bool + fstrim_path: str | None + # Required tools has_hyperfine: bool has_flamegraph: bool @@ -88,6 +98,11 @@ def get_warnings(self) -> list[str]: "drop-caches not available - cache won't be cleared between runs" ) + if not self.can_fstrim: + warnings.append( + "fstrim not available - SSD TRIM won't run before benchmarks" + ) + return warnings @@ -104,6 +119,14 @@ def _find_drop_caches() -> str | None: return None +def _find_fstrim() -> str | None: + """Find fstrim executable.""" + for path in FSTRIM_PATHS: + if Path(path).exists() and os.access(path, os.X_OK): + return path + return None + + def _is_nixos() -> bool: """Check if we're running on NixOS.""" return Path("/etc/NIXOS").exists() @@ -112,10 +135,13 @@ def _is_nixos() -> bool: def detect_capabilities() -> Capabilities: """Auto-detect system capabilities.""" drop_caches_path = _find_drop_caches() + fstrim_path = _find_fstrim() return Capabilities( can_drop_caches=drop_caches_path is not None, drop_caches_path=drop_caches_path, + can_fstrim=fstrim_path is not None, + fstrim_path=fstrim_path, has_hyperfine=_check_executable("hyperfine"), has_flamegraph=_check_executable("flamegraph"), has_perf=_check_executable("perf"), From 3407df0a62f775b35b7e08906eef3238d61d2009 Mon Sep 17 00:00:00 2001 From: will Date: Thu, 5 Mar 2026 22:03:54 +0000 Subject: [PATCH 39/46] Revert "use local bitcoin node to test region network issues" This reverts commit fd8bdf64ec654c3dbc8677a2bfffa005cbac5ed0. --- bench/config.py | 2 +- bench/configs/nightly.toml | 2 +- bench/configs/pr.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bench/config.py b/bench/config.py index 93edac6e823c..35af8f5c116c 100644 --- a/bench/config.py +++ b/bench/config.py @@ -40,7 +40,7 @@ "ci": { "stop_height": 855000, "runs": 3, - "connect": "127.0.0.1:38333", + "connect": "148.251.128.115:33333", }, } diff --git a/bench/configs/nightly.toml b/bench/configs/nightly.toml index 74c0a3307296..c07a7f04e091 100644 --- a/bench/configs/nightly.toml +++ b/bench/configs/nightly.toml @@ -13,7 +13,7 @@ runs = 2 [bitcoind] stopatheight = 900000 chain = "main" -connect = "127.0.0.1:38333" +connect = "148.251.128.115:33333" # accepts whitelisted ip addrs only prune = 1000000 daemon = false printtoconsole = false diff --git a/bench/configs/pr.toml b/bench/configs/pr.toml index 38a56f4bd06e..3513deba34ec 100644 --- a/bench/configs/pr.toml +++ b/bench/configs/pr.toml @@ -13,7 +13,7 @@ runs = 2 [bitcoind] stopatheight = 900000 chain = "main" -connect = "127.0.0.1:38333" +connect = "148.251.128.115:33333" prune = 1000000 daemon = false printtoconsole = false From c856fd6186150926cac4e5bc7920a6532a8e65b6 Mon Sep 17 00:00:00 2001 From: will Date: Thu, 5 Mar 2026 22:17:53 +0000 Subject: [PATCH 40/46] fstrim the mount point, not a subdirectory FITRIM ioctl requires the filesystem mount point. Resolve it from the tmp_datadir path by walking up to the mount boundary. --- bench/benchmark.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/bench/benchmark.py b/bench/benchmark.py index ef7e151badbe..ad89c72a4634 100644 --- a/bench/benchmark.py +++ b/bench/benchmark.py @@ -22,6 +22,14 @@ logger = logging.getLogger(__name__) +def _find_mount_point(path: Path) -> Path: + """Walk up from path to find its mount point.""" + path = path.resolve() + while not path.is_mount(): + path = path.parent + return path + + @dataclass class BenchmarkResult: """Result of the benchmark phase.""" @@ -219,7 +227,8 @@ def _create_setup_script(self, tmp_datadir: Path) -> Path: # TRIM SSD once before benchmarking for consistent write performance if self.capabilities.can_fstrim: - commands.append(f'{self.capabilities.fstrim_path} "{tmp_datadir}"') + mount = _find_mount_point(tmp_datadir) + commands.append(f'{self.capabilities.fstrim_path} "{mount}"') return self._create_temp_script(commands, "setup") def _create_prepare_script( From 8750a7ba815b8db883820c8c049ee26b186f91ac Mon Sep 17 00:00:00 2001 From: will Date: Fri, 6 Mar 2026 22:06:20 +0000 Subject: [PATCH 41/46] show manual nightly re-runs as scatter points on chart Manual (workflow_dispatch) runs are now stored separately from scheduled nightly runs. Scheduled runs still dedup by (date, commit, dbcache) to handle retries. Manual runs always append, appearing as diamond markers on the chart alongside the nightly trend line. Also ruff format. --- .github/workflows/nightly-benchmark.yml | 10 +++- bench.py | 8 ++++ bench/nightly.py | 47 +++++++++++++------ bench/report.py | 7 ++- bench/templates/nightly-chart.html | 61 ++++++++++++++++--------- 5 files changed, 96 insertions(+), 37 deletions(-) diff --git a/.github/workflows/nightly-benchmark.yml b/.github/workflows/nightly-benchmark.yml index d8d4ca821f94..7ee1930463f4 100644 --- a/.github/workflows/nightly-benchmark.yml +++ b/.github/workflows/nightly-benchmark.yml @@ -205,13 +205,19 @@ jobs: - name: Install Nix uses: cachix/install-nix-action@v31 - - name: Get dates + - name: Get dates and trigger run: | # Commit date (for chart X-axis) COMMIT_DATE=$(cat ./commit-info/commit-date.txt) echo "COMMIT_DATE=$COMMIT_DATE" >> "$GITHUB_ENV" # Run date (for reference) echo "RUN_DATE=$(date -u +%Y-%m-%d)" >> "$GITHUB_ENV" + # Trigger type (scheduled nightly vs manual dispatch) + if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then + echo "TRIGGER=manual" >> "$GITHUB_ENV" + else + echo "TRIGGER=scheduled" >> "$GITHUB_ENV" + fi - name: Append results to history run: | @@ -226,6 +232,7 @@ jobs: 450 \ --date "$COMMIT_DATE" \ --run-date "$RUN_DATE" \ + --trigger "$TRIGGER" \ --benchmark-config benchcoin-tools/bench/configs/nightly.toml \ --machine-specs ./machine-specs/machine-specs.json @@ -238,6 +245,7 @@ jobs: 32000 \ --date "$COMMIT_DATE" \ --run-date "$RUN_DATE" \ + --trigger "$TRIGGER" \ --benchmark-config benchcoin-tools/bench/configs/nightly.toml \ --machine-specs ./machine-specs/machine-specs.json diff --git a/bench.py b/bench.py index 6dc371a13d27..d696b115893a 100755 --- a/bench.py +++ b/bench.py @@ -327,6 +327,7 @@ def cmd_nightly(args: argparse.Namespace) -> int: instrumentation=args.instrumentation, machine_specs_file=machine_specs_file, run_date=args.run_date or "", + trigger=args.trigger, ) logger.info(f"Appended result to {history_file}") elif args.nightly_command == "chart": @@ -575,6 +576,13 @@ def main() -> int: metavar="YYYY-MM-DD", help="Date when benchmark was executed (default: today). Stored for reference.", ) + nightly_append.add_argument( + "--trigger", + default="scheduled", + choices=["scheduled", "manual"], + help="How the benchmark was triggered (default: scheduled). " + "Scheduled runs dedup by commit; manual runs are always kept.", + ) # nightly chart nightly_chart = nightly_subparsers.add_parser( diff --git a/bench/nightly.py b/bench/nightly.py index 9da3645dd0a9..e1e07141aeda 100644 --- a/bench/nightly.py +++ b/bench/nightly.py @@ -134,7 +134,9 @@ def series_label(result: "NightlyResult") -> str: prune = bitcoind.get("prune", 0) prune_str = f", prune {prune}" if prune else "" - return f"{arch}, {cpu_short}, {ram_str}, {block_range}, dbcache {dbcache}{prune_str}" + return ( + f"{arch}, {cpu_short}, {ram_str}, {block_range}, dbcache {dbcache}{prune_str}" + ) @dataclass @@ -151,6 +153,9 @@ class NightlyResult: ] # Full benchmark config (dbcache inside config.bitcoind.dbcache) machine: dict[str, Any] # Full machine specs run_date: str = "" # When benchmark was executed (reference only) + trigger: str = ( + "scheduled" # "scheduled" (nightly cron) or "manual" (workflow_dispatch) + ) @property def dbcache(self) -> int: @@ -183,6 +188,8 @@ def to_dict(self) -> dict[str, Any]: } if self.run_date: result["run_date"] = self.run_date + if self.trigger != "scheduled": + result["trigger"] = self.trigger return result @classmethod @@ -202,6 +209,7 @@ def from_dict(cls, data: dict[str, Any]) -> NightlyResult: config=data["config"], machine=data.get("machine", {}), run_date=data.get("run_date", ""), + trigger=data.get("trigger", "scheduled"), ) # Legacy format - convert to new format @@ -252,19 +260,24 @@ def save(self) -> None: logger.info(f"Saved {len(self.results)} results to {self.history_file}") def append(self, result: NightlyResult) -> None: - """Append a new result to history.""" - # Check for duplicate (same date, commit, dbcache) - for existing in self.results: - if ( - existing.date == result.date - and existing.commit == result.commit - and existing.dbcache == result.dbcache - ): - logger.warning( - f"Duplicate result for {result.date} {result.commit[:8]} dbcache={result.dbcache}, replacing" - ) - self.results.remove(existing) - break + """Append a new result to history. + + Scheduled runs dedup by (date, commit, dbcache) to handle retries. + Manual runs are always appended as additional data points. + """ + if result.trigger == "scheduled": + for existing in self.results: + if ( + existing.trigger == "scheduled" + and existing.date == result.date + and existing.commit == result.commit + and existing.dbcache == result.dbcache + ): + logger.warning( + f"Replacing scheduled result for {result.date} {result.commit[:8]} dbcache={result.dbcache}" + ) + self.results.remove(existing) + break self.results.append(result) # Sort by date, then dbcache @@ -314,6 +327,8 @@ def get_chart_data(self) -> list[dict]: "series_key": key, "series_label": series_label(r), "color_index": series_color_index(key), + "trigger": r.trigger, + "run_date": r.run_date, } ) return chart_data @@ -326,6 +341,7 @@ def append_from_results_json( machine_specs: dict[str, Any], date_str: str | None = None, run_date: str = "", + trigger: str = "scheduled", ) -> None: """Append result from a hyperfine results.json file. @@ -367,6 +383,7 @@ def append_from_results_json( config=benchmark_config, machine=machine_specs, run_date=run_date, + trigger=trigger, ) self.append(result) @@ -404,6 +421,7 @@ def append( instrumentation: str = "uninstrumented", machine_specs_file: Path | None = None, run_date: str = "", + trigger: str = "scheduled", ) -> None: """Append a result from hyperfine results.json to history. @@ -450,6 +468,7 @@ def append( machine_specs=machine_specs, date_str=date_str, run_date=run_date, + trigger=trigger, ) history.save() diff --git a/bench/report.py b/bench/report.py index 02c28846a4cb..2e680d6f87f6 100644 --- a/bench/report.py +++ b/bench/report.py @@ -193,7 +193,12 @@ def generate_multi_network( # Generate HTML html = self._generate_html( - all_runs, nightly_comparison, full_title, output_dir, output_dir, commit, + all_runs, + nightly_comparison, + full_title, + output_dir, + output_dir, + commit, run_id, ) diff --git a/bench/templates/nightly-chart.html b/bench/templates/nightly-chart.html index c3732fab14b6..146d9cea3949 100644 --- a/bench/templates/nightly-chart.html +++ b/bench/templates/nightly-chart.html @@ -103,10 +103,12 @@

    Bitcoin Core Nightly IBD Benchmark

    seriesMap.set(key, { label: d.series_label || (d.config + ' dbcache'), colorIndex: d.color_index || 0, - points: [] + scheduled: [], + manual: [] }); } - seriesMap.get(key).points.push(d); + const bucket = (d.trigger === 'manual') ? 'manual' : 'scheduled'; + seriesMap.get(key)[bucket].push(d); }); const traces = []; @@ -114,27 +116,44 @@

    Bitcoin Core Nightly IBD Benchmark

    .sort((a, b) => a[1].label.localeCompare(b[1].label)); sortedSeries.forEach(([seriesKey, series]) => { - const points = series.points.sort((a, b) => a.date.localeCompare(b.date)); const color = getSeriesColor(series.colorIndex); - traces.push({ - name: series.label, - x: points.map(d => d.date), - y: points.map(d => toMinutes(d.mean)), - text: points.map(d => d.commit.slice(0, 8)), - customdata: points.map(d => [d.commit, toMinutes(d.stddev || 0)]), - hovertemplate: '%{text}
    %{y:.1f} min
    \u00b1%{customdata[1]:.1f} min' + series.label + '', - mode: 'lines+markers', - line: { color: color, width: 2 }, - marker: { size: 8 }, - error_y: { - type: 'data', - array: points.map(d => toMinutes(d.stddev || 0)), - visible: true, - color: getErrorColor(color), - thickness: 1.5 - } - }); + if (series.scheduled.length > 0) { + const points = series.scheduled.sort((a, b) => a.date.localeCompare(b.date)); + traces.push({ + name: series.label, + x: points.map(d => d.date), + y: points.map(d => toMinutes(d.mean)), + text: points.map(d => d.commit.slice(0, 8)), + customdata: points.map(d => [d.commit, toMinutes(d.stddev || 0)]), + hovertemplate: '%{text}
    %{y:.1f} min
    \u00b1%{customdata[1]:.1f} min' + series.label + '', + mode: 'lines+markers', + line: { color: color, width: 2 }, + marker: { size: 8 }, + error_y: { + type: 'data', + array: points.map(d => toMinutes(d.stddev || 0)), + visible: true, + color: getErrorColor(color), + thickness: 1.5 + } + }); + } + + if (series.manual.length > 0) { + const points = series.manual.sort((a, b) => a.date.localeCompare(b.date)); + traces.push({ + name: series.label + ' (manual)', + x: points.map(d => d.run_date || d.date), + y: points.map(d => toMinutes(d.mean)), + text: points.map(d => d.commit.slice(0, 8)), + customdata: points.map(d => [d.commit, d.run_date || d.date]), + hovertemplate: '%{text}
    %{y:.1f} min
    run: %{customdata[1]}' + series.label + ' (manual)', + mode: 'markers', + marker: { size: 10, symbol: 'diamond', color: color, line: { color: 'white', width: 1 } }, + legendgroup: seriesKey + }); + } }); return traces; From ef49957f8e08068af5dc4a100d0892387b55544c Mon Sep 17 00:00:00 2001 From: will Date: Fri, 13 Mar 2026 10:59:06 +0000 Subject: [PATCH 42/46] Compare PR benchmarks against median of last 7 nightly runs --- .github/workflows/publish-results.yml | 4 +-- bench/nightly.py | 35 ++++++++++++++++++++++++ bench/report.py | 39 +++++++++++++++------------ bench/templates/pr-report.html | 5 ++-- 4 files changed, 61 insertions(+), 22 deletions(-) diff --git a/.github/workflows/publish-results.yml b/.github/workflows/publish-results.yml index b583388ba54e..4cbb44cc5b36 100644 --- a/.github/workflows/publish-results.yml +++ b/.github/workflows/publish-results.yml @@ -114,7 +114,7 @@ jobs: .nightly_comparison | to_entries | map( "\(.key) MB: \(.value.pr_mean / 60 | floor) min" + if .value.nightly_mean then - " (nightly: \(.value.nightly_mean / 60 | floor) min, \(.value.nightly_date)) β†’ " + + " (nightly median of \(.value.nightly_count): \(.value.nightly_mean / 60 | floor) min, \(.value.nightly_date_range)) β†’ " + if .value.speedup_percent > 0 then "+\(.value.speedup_percent)% faster" elif .value.speedup_percent < 0 then "\(.value.speedup_percent)% slower" else "same" @@ -188,7 +188,7 @@ jobs: --repo ${{ github.repository }} \ --body "## Benchmark Results - **Comparison to nightly master:** + **Comparison to nightly master (median of last 7 runs):** - ${{ needs.build.outputs.comparison }} [View detailed results](${{ needs.build.outputs.result-url }}) diff --git a/bench/nightly.py b/bench/nightly.py index e1e07141aeda..d8615a00fb4f 100644 --- a/bench/nightly.py +++ b/bench/nightly.py @@ -308,6 +308,41 @@ def get_latest(self, dbcache: int | str) -> NightlyResult | None: # Results are sorted by date, so last one is most recent return matching[-1] + def get_recent_median( + self, dbcache: int | str, n: int = 7 + ) -> tuple[float, list[NightlyResult]] | None: + """Get the median mean of the most recent N scheduled results for a dbcache config. + + Args: + dbcache: DB cache size in MB (int) or config name like '450', '32000' + n: Number of recent results to average + + Returns: + Tuple of (median_mean, list_of_results_used), or None if no results found + """ + if isinstance(dbcache, str): + try: + dbcache = int(dbcache) + except ValueError: + return None + + matching = [ + r + for r in self.results + if r.dbcache == dbcache and r.trigger == "scheduled" + ] + if not matching: + return None + + recent = matching[-n:] + sorted_means = sorted(r.mean for r in recent) + mid = len(sorted_means) // 2 + if len(sorted_means) % 2 == 0: + median = (sorted_means[mid - 1] + sorted_means[mid]) / 2 + else: + median = sorted_means[mid] + return median, recent + def get_chart_data(self) -> list[dict]: """Get results in format suitable for chart embedding. diff --git a/bench/report.py b/bench/report.py index 2e680d6f87f6..e032c48a94e5 100644 --- a/bench/report.py +++ b/bench/report.py @@ -346,8 +346,8 @@ def _calculate_nightly_comparison( ) -> dict[str, dict[str, Any]]: """Calculate comparison against nightly baseline. - Compares PR results against the most recent nightly results for each config. - Only considers uninstrumented configs (those without '-true' suffix). + Compares PR results against the median of the most recent 7 nightly results + for each config. Only considers uninstrumented configs. Args: runs: List of benchmark runs @@ -360,8 +360,8 @@ def _calculate_nightly_comparison( "pr_mean": 14500.0, "pr_stddev": 100.0, "nightly_mean": 14800.0, - "nightly_date": "2026-01-05", - "nightly_commit": "abc123...", + "nightly_count": 7, + "nightly_date_range": "2026-01-01 to 2026-01-07", "speedup_percent": 2.0 } } @@ -387,25 +387,30 @@ def _calculate_nightly_comparison( pr_mean = run.mean pr_stddev = run.stddev - # Get latest nightly for this config - nightly = self.nightly_history.get_latest(config) + # Get median of recent nightly results for this config + result = self.nightly_history.get_recent_median(config, n=7) - if nightly: + if result: + nightly_median, recent_results = result speedup = None - if nightly.mean > 0: - speedup = round(((nightly.mean - pr_mean) / nightly.mean) * 100, 1) + if nightly_median > 0: + speedup = round( + ((nightly_median - pr_mean) / nightly_median) * 100, 1 + ) + + # Use the latest result for series key/label and chart positioning + latest = recent_results[-1] comparison[config] = { "pr_mean": pr_mean, "pr_stddev": pr_stddev, "pr_commit": commit, - "nightly_mean": nightly.mean, - "nightly_stddev": nightly.stddev, - "nightly_date": nightly.date, - "nightly_commit": nightly.commit, + "nightly_mean": nightly_median, + "nightly_count": len(recent_results), + "nightly_date_range": f"{recent_results[0].date} to {recent_results[-1].date}", "speedup_percent": speedup, - "series_key": series_key(nightly), - "series_label": series_label(nightly), + "series_key": series_key(latest), + "series_label": series_label(latest), } else: # No nightly data, just record PR result @@ -414,8 +419,8 @@ def _calculate_nightly_comparison( "pr_stddev": pr_stddev, "pr_commit": commit, "nightly_mean": None, - "nightly_date": None, - "nightly_commit": None, + "nightly_count": 0, + "nightly_date_range": None, "speedup_percent": None, } diff --git a/bench/templates/pr-report.html b/bench/templates/pr-report.html index 9ebfecbc2f34..3e968325406b 100644 --- a/bench/templates/pr-report.html +++ b/bench/templates/pr-report.html @@ -42,7 +42,7 @@

    Comparison to Nightly Master

    Config PR Time - Nightly Time (Date) + Nightly Median (Date Range) Change @@ -54,8 +54,7 @@

    Comparison to Nightly Master

    {% if data.nightly_mean %} {{ "%.1f"|format(data.nightly_mean / 60) }} min - ({{ data.nightly_date }}{% if data.nightly_commit %}, - {{ data.nightly_commit[:7] }}{% endif %}) + (median of {{ data.nightly_count }}, {{ data.nightly_date_range }}) {% else %} No baseline {% endif %} From d9b482816ff07cc8e0a120fee3d1bde5a82c9f60 Mon Sep 17 00:00:00 2001 From: will Date: Fri, 13 Mar 2026 20:23:39 +0000 Subject: [PATCH 43/46] merge manual nightly runs into their series on the chart Manual (workflow_dispatch) runs no longer get a separate "(manual)" legend entry with diamond markers. They appear as regular points in the same series trace as scheduled runs. --- bench/templates/nightly-chart.html | 63 +++++++++++------------------- 1 file changed, 22 insertions(+), 41 deletions(-) diff --git a/bench/templates/nightly-chart.html b/bench/templates/nightly-chart.html index 146d9cea3949..0ebf835bd61a 100644 --- a/bench/templates/nightly-chart.html +++ b/bench/templates/nightly-chart.html @@ -103,12 +103,10 @@

    Bitcoin Core Nightly IBD Benchmark

    seriesMap.set(key, { label: d.series_label || (d.config + ' dbcache'), colorIndex: d.color_index || 0, - scheduled: [], - manual: [] + points: [] }); } - const bucket = (d.trigger === 'manual') ? 'manual' : 'scheduled'; - seriesMap.get(key)[bucket].push(d); + seriesMap.get(key).points.push(d); }); const traces = []; @@ -117,43 +115,26 @@

    Bitcoin Core Nightly IBD Benchmark

    sortedSeries.forEach(([seriesKey, series]) => { const color = getSeriesColor(series.colorIndex); - - if (series.scheduled.length > 0) { - const points = series.scheduled.sort((a, b) => a.date.localeCompare(b.date)); - traces.push({ - name: series.label, - x: points.map(d => d.date), - y: points.map(d => toMinutes(d.mean)), - text: points.map(d => d.commit.slice(0, 8)), - customdata: points.map(d => [d.commit, toMinutes(d.stddev || 0)]), - hovertemplate: '%{text}
    %{y:.1f} min
    \u00b1%{customdata[1]:.1f} min' + series.label + '', - mode: 'lines+markers', - line: { color: color, width: 2 }, - marker: { size: 8 }, - error_y: { - type: 'data', - array: points.map(d => toMinutes(d.stddev || 0)), - visible: true, - color: getErrorColor(color), - thickness: 1.5 - } - }); - } - - if (series.manual.length > 0) { - const points = series.manual.sort((a, b) => a.date.localeCompare(b.date)); - traces.push({ - name: series.label + ' (manual)', - x: points.map(d => d.run_date || d.date), - y: points.map(d => toMinutes(d.mean)), - text: points.map(d => d.commit.slice(0, 8)), - customdata: points.map(d => [d.commit, d.run_date || d.date]), - hovertemplate: '%{text}
    %{y:.1f} min
    run: %{customdata[1]}' + series.label + ' (manual)', - mode: 'markers', - marker: { size: 10, symbol: 'diamond', color: color, line: { color: 'white', width: 1 } }, - legendgroup: seriesKey - }); - } + const points = series.points.sort((a, b) => a.date.localeCompare(b.date)); + + traces.push({ + name: series.label, + x: points.map(d => d.date), + y: points.map(d => toMinutes(d.mean)), + text: points.map(d => d.commit.slice(0, 8)), + customdata: points.map(d => [d.commit, toMinutes(d.stddev || 0)]), + hovertemplate: '%{text}
    %{y:.1f} min
    \u00b1%{customdata[1]:.1f} min' + series.label + '', + mode: 'lines+markers', + line: { color: color, width: 2 }, + marker: { size: 8 }, + error_y: { + type: 'data', + array: points.map(d => toMinutes(d.stddev || 0)), + visible: true, + color: getErrorColor(color), + thickness: 1.5 + } + }); }); return traces; From 52bcc0e68973bcdf126bc38e07c6b597d7e01198 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 17 Mar 2026 13:47:13 +0000 Subject: [PATCH 44/46] Add assumevalid=0 benchmark runs to PR workflow Adds a separate benchmark job (benchmark-noav) that runs IBD with -assumevalid=0 to measure full script verification performance. Uses a dedicated TOML config with uninstrumented-only matrix, and prefixes artifacts with noav- so the publish workflow can handle them alongside existing runs. --- .github/workflows/benchmark.yml | 70 +++++++++++++++++++++++++++ .github/workflows/publish-results.yml | 4 +- bench/configs/pr-noassumevalid.toml | 26 ++++++++++ bench/report.py | 42 ++++++++++------ 4 files changed, 125 insertions(+), 17 deletions(-) create mode 100644 bench/configs/pr-noassumevalid.toml diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 730ab434ee14..f75fcafa2b90 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -97,3 +97,73 @@ jobs: with: name: run-metadata-${{ matrix.name }} path: ${{ runner.temp }}/contexts/ + + benchmark-noav: + needs: build-binary + strategy: + matrix: + name: [450-uninstrumented, 32000-uninstrumented] + runs-on: [self-hosted, linux, x64] + timeout-minutes: 600 + env: + ORIGINAL_DATADIR: /data/pruned-840k + steps: + - name: Checkout repo + uses: actions/checkout@v4 + with: + fetch-depth: 1 + + - name: Download binaries + uses: actions/download-artifact@v4 + with: + name: bitcoind-binaries + path: ${{ runner.temp }}/binaries + + - name: Set binary permissions + run: | + chmod +x ${{ runner.temp }}/binaries/pr/bitcoind + + - name: Run benchmark + run: | + nix develop --command python3 bench.py run \ + --benchmark-config bench/configs/pr-noassumevalid.toml \ + --matrix-entry ${{ matrix.name }} \ + --datadir $ORIGINAL_DATADIR \ + --tmp-datadir ${{ runner.temp }}/datadir \ + --output-dir ${{ runner.temp }}/output \ + pr:${{ runner.temp }}/binaries/pr/bitcoind + + - name: Upload results + uses: actions/upload-artifact@v4 + with: + name: result-noav-${{ matrix.name }} + path: ${{ runner.temp }}/output/results.json + + - name: Upload flamegraphs + uses: actions/upload-artifact@v4 + with: + name: flamegraph-noav-${{ matrix.name }} + path: ${{ runner.temp }}/output/*-flamegraph.svg + if-no-files-found: ignore + + - name: Upload debug logs + uses: actions/upload-artifact@v4 + with: + name: debug-logs-noav-${{ matrix.name }} + path: ${{ runner.temp }}/output/*-debug.log + if-no-files-found: ignore + + - name: Write context metadata + env: + GITHUB_CONTEXT: ${{ toJSON(github) }} + RUNNER_CONTEXT: ${{ toJSON(runner) }} + run: | + mkdir -p ${{ runner.temp }}/contexts + echo "$GITHUB_CONTEXT" | nix develop --command jq "del(.token)" > ${{ runner.temp }}/contexts/github.json + echo "$RUNNER_CONTEXT" > ${{ runner.temp }}/contexts/runner.json + + - name: Upload context metadata + uses: actions/upload-artifact@v4 + with: + name: run-metadata-noav-${{ matrix.name }} + path: ${{ runner.temp }}/contexts/ diff --git a/.github/workflows/publish-results.yml b/.github/workflows/publish-results.yml index 4cbb44cc5b36..27a619527df1 100644 --- a/.github/workflows/publish-results.yml +++ b/.github/workflows/publish-results.yml @@ -12,8 +12,8 @@ jobs: contents: write checks: read env: - # Matrix entries from configs/pr.toml: dbcache=[450,32000] x instrumentation=[uninstrumented,instrumented] - NETWORKS: "450-uninstrumented,450-instrumented,32000-uninstrumented,32000-instrumented" + # Matrix entries from configs/pr.toml + pr-noassumevalid.toml + NETWORKS: "450-uninstrumented,450-instrumented,32000-uninstrumented,32000-instrumented,noav-450-uninstrumented,noav-32000-uninstrumented" outputs: comparison: ${{ steps.generate.outputs.comparison }} pr-number: ${{ steps.metadata.outputs.pr-number }} diff --git a/bench/configs/pr-noassumevalid.toml b/bench/configs/pr-noassumevalid.toml new file mode 100644 index 000000000000..aac45ca4954e --- /dev/null +++ b/bench/configs/pr-noassumevalid.toml @@ -0,0 +1,26 @@ +# PR benchmark configuration with assumevalid=0 (full script verification) +# Measures validation/script verification performance ("what it's like at tip") +# +# Usage: +# bench.py run --benchmark-config bench/configs/pr-noassumevalid.toml --matrix-entry 450-uninstrumented \ +# --datadir /data/pruned-840k --output-dir ./output \ +# pr:/path/to/pr/bitcoind + +[benchmark] +start_height = 840000 +runs = 2 + +[bitcoind] +stopatheight = 900000 +chain = "main" +connect = "148.251.128.115:33333" +prune = 1000000 +daemon = false +printtoconsole = false +assumevalid = "0" + +# Parameter matrix - creates multiple benchmark configurations +# Matrix expands to: 450-uninstrumented, 32000-uninstrumented +[bitcoind.matrix] +dbcache = [450, 32000] +instrumentation = ["uninstrumented"] diff --git a/bench/report.py b/bench/report.py index e032c48a94e5..da517e801057 100644 --- a/bench/report.py +++ b/bench/report.py @@ -29,6 +29,7 @@ def format_config_display( dbcache: int, machine_id: str | None = None, instrumentation: str | None = None, + noassumevalid: bool = False, ) -> str: """Format config for display. @@ -36,6 +37,7 @@ def format_config_display( dbcache: DB cache size in MB machine_id: Machine ID (e.g., "amd64", "arm64") instrumentation: Instrumentation mode (e.g., "instrumented", "uninstrumented") + noassumevalid: Whether assumevalid=0 was used Returns: Display string like "dbcache=450MB (amd64, instrumented)" @@ -45,8 +47,8 @@ def format_config_display( 'dbcache=450MB' >>> format_config_display(32000, "amd64") 'dbcache=32GB (amd64)' - >>> format_config_display(450, "arm64", "instrumented") - 'dbcache=450MB (arm64, instrumented)' + >>> format_config_display(450, noassumevalid=True) + 'dbcache=450MB (assumevalid=0)' """ # Format dbcache with unit if dbcache >= 1000: @@ -60,29 +62,35 @@ def format_config_display( parts.append(machine_id) if instrumentation and instrumentation != "uninstrumented": parts.append(instrumentation) + if noassumevalid: + parts.append("assumevalid=0") if parts: return f"dbcache={cache_str} ({', '.join(parts)})" return f"dbcache={cache_str}" -def parse_network_name(network: str) -> tuple[int, str]: - """Parse a network/config name to extract dbcache and instrumentation. +def parse_network_name(network: str) -> tuple[int, str, bool]: + """Parse a network/config name to extract dbcache, instrumentation, and noassumevalid. Args: - network: Network name like "450-uninstrumented", "32000-instrumented", "450", "32000" + network: Network name like "450-uninstrumented", "noav-32000-uninstrumented", "450" Returns: - Tuple of (dbcache_int, instrumentation_str) + Tuple of (dbcache_int, instrumentation_str, noassumevalid_bool) Examples: >>> parse_network_name("450-uninstrumented") - (450, 'uninstrumented') - >>> parse_network_name("32000-instrumented") - (32000, 'instrumented') + (450, 'uninstrumented', False) + >>> parse_network_name("noav-32000-uninstrumented") + (32000, 'uninstrumented', True) >>> parse_network_name("450") - (450, 'uninstrumented') + (450, 'uninstrumented', False) """ + noassumevalid = network.startswith("noav-") + if noassumevalid: + network = network[len("noav-"):] + parts = network.split("-") try: dbcache = int(parts[0]) @@ -90,7 +98,7 @@ def parse_network_name(network: str) -> tuple[int, str]: dbcache = 0 instrumentation = parts[1] if len(parts) > 1 else "uninstrumented" - return dbcache, instrumentation + return dbcache, instrumentation, noassumevalid @dataclass @@ -467,9 +475,9 @@ def _generate_html( runs_data = [] for run in sorted_runs: - dbcache, instrumentation = parse_network_name(run.network) + dbcache, instrumentation, noassumevalid = parse_network_name(run.network) config_display = format_config_display( - dbcache, instrumentation=instrumentation + dbcache, instrumentation=instrumentation, noassumevalid=noassumevalid ) runs_data.append( { @@ -522,14 +530,18 @@ def _prepare_nightly_data( pr_chart_data = [] for config, data in sorted(nightly_comparison.items()): + noassumevalid = config.startswith("noav-") + raw = config[len("noav-"):] if noassumevalid else config try: - dbcache = int(config) + dbcache = int(raw) except ValueError: dbcache = 0 result[config] = { **data, - "config_display": format_config_display(dbcache), + "config_display": format_config_display( + dbcache, noassumevalid=noassumevalid + ), } if data.get("nightly_mean"): From de32701d93501b2592c243d8f07d52129f712178 Mon Sep 17 00:00:00 2001 From: will Date: Fri, 24 Apr 2026 20:56:57 +0100 Subject: [PATCH 45/46] Add BIP68 fast-path: skip entire BIP68 coin iteration and SequenceLocks when CSV not active (early blocks / regtest) Port the optimization from pi-autoresearch commit 9090b37ca73a2bfe90bb0aaf5c90f05971553442 into this tree, excluding the benchmark metadata file. --- src/validation.cpp | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index f85a834f2a4a..2452568d5dab 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2546,15 +2546,24 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, // Check that transaction is BIP68 final // BIP68 lock checks (as opposed to nLockTime checks) must // be in ConnectBlock because they require the UTXO set - prevheights.resize(tx.vin.size()); - for (size_t j = 0; j < tx.vin.size(); j++) { - prevheights[j] = view.AccessCoin(tx.vin[j].prevout).nHeight; - } + if (nLockTimeFlags & LOCKTIME_VERIFY_SEQUENCE) { + prevheights.resize(tx.vin.size()); + std::vector spent_outputs; + spent_outputs.reserve(tx.vin.size()); + for (size_t j = 0; j < tx.vin.size(); j++) { + const Coin& coin = view.AccessCoin(tx.vin[j].prevout); + prevheights[j] = coin.nHeight; + spent_outputs.emplace_back(coin.out); + } - if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) { - state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal", - "contains a non-BIP68-final transaction " + tx.GetHash().ToString()); - break; + if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) { + state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal", + "contains a non-BIP68-final transaction " + tx.GetHash().ToString()); + break; + } + // Pre-initialize txdata with spent outputs to avoid redundant + // coin iteration in CheckInputScripts. + txsdata[i].Init(tx, std::move(spent_outputs)); } } From ff7fbfc96f9876b53c4bc7d4fe4d612a0bad20b3 Mon Sep 17 00:00:00 2001 From: will Date: Fri, 24 Apr 2026 20:58:52 +0100 Subject: [PATCH 46/46] Eliminate redundant HaveInputs() call in CheckTxInputs: merge missing-input check into the existing AccessCoin loop Port the optimization from pi-autoresearch commit 6dcb7f2527f8aaf92330936321bd9d21e93cd6e6 into this tree, excluding the benchmark metadata file. This keeps the missing-input handling in the same loop that already reads each coin and always seeds PrecomputedTransactionData in ConnectBlock so CheckInputScripts can reuse the fetched spent outputs even when CSV is inactive. --- src/consensus/tx_verify.cpp | 11 ++++------- src/validation.cpp | 15 ++++++++++----- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/src/consensus/tx_verify.cpp b/src/consensus/tx_verify.cpp index 4efed70fd411..f3c0355563ff 100644 --- a/src/consensus/tx_verify.cpp +++ b/src/consensus/tx_verify.cpp @@ -163,17 +163,14 @@ int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& i bool Consensus::CheckTxInputs(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, CAmount& txfee) { - // are the actual inputs available? - if (!inputs.HaveInputs(tx)) { - return state.Invalid(TxValidationResult::TX_MISSING_INPUTS, "bad-txns-inputs-missingorspent", - strprintf("%s: inputs missing/spent", __func__)); - } - CAmount nValueIn = 0; for (unsigned int i = 0; i < tx.vin.size(); ++i) { const COutPoint &prevout = tx.vin[i].prevout; const Coin& coin = inputs.AccessCoin(prevout); - assert(!coin.IsSpent()); + if (coin.IsSpent()) { + return state.Invalid(TxValidationResult::TX_MISSING_INPUTS, "bad-txns-inputs-missingorspent", + strprintf("%s: inputs missing/spent", __func__)); + } // If prev is coinbase, check that it's matured if (coin.IsCoinBase() && nSpendHeight - coin.nHeight < COINBASE_MATURITY) { diff --git a/src/validation.cpp b/src/validation.cpp index 2452568d5dab..8ae3b0fb24dc 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2546,10 +2546,12 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, // Check that transaction is BIP68 final // BIP68 lock checks (as opposed to nLockTime checks) must // be in ConnectBlock because they require the UTXO set + // Collect spent outputs regardless of BIP68 to avoid redundant + // coin iteration in CheckInputScripts. + std::vector spent_outputs; + spent_outputs.reserve(tx.vin.size()); if (nLockTimeFlags & LOCKTIME_VERIFY_SEQUENCE) { prevheights.resize(tx.vin.size()); - std::vector spent_outputs; - spent_outputs.reserve(tx.vin.size()); for (size_t j = 0; j < tx.vin.size(); j++) { const Coin& coin = view.AccessCoin(tx.vin[j].prevout); prevheights[j] = coin.nHeight; @@ -2561,10 +2563,13 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, "contains a non-BIP68-final transaction " + tx.GetHash().ToString()); break; } - // Pre-initialize txdata with spent outputs to avoid redundant - // coin iteration in CheckInputScripts. - txsdata[i].Init(tx, std::move(spent_outputs)); + } else { + for (size_t j = 0; j < tx.vin.size(); j++) { + spent_outputs.emplace_back(view.AccessCoin(tx.vin[j].prevout).out); + } } + + txsdata[i].Init(tx, std::move(spent_outputs)); } // GetTransactionSigOpCost counts 3 types of sigops: