meson: wire up benchmarks

Wire up benchmarks in Meson. The setup is mostly the same as how we wire
up our tests. The only difference is that benchmarks get wired up via
the `benchmark()` option instead of via `test()`, which gives them a bit
of special treatment:

  - Benchmarks never run in parallel.

  - Benchmarks aren't run by default when tests are executed.

  - Meson does not inject the `MALLOC_PERTURB` environment variable.

Using benchmarks is quite simple:

    ```
    $ meson setup build
    # Run all benchmarks.
    $ meson test -C build --benchmark
    # Run a specific benchmark.
    $ meson test -C build --benchmark p0000-*
    ```

Other than that the usual command line arguments accepted when running
tests are also accepted when running benchmarks.

Note that the benchmarking target is somewhat limited because it will
only run benchmarks for the current build. Other use cases, like running
benchmarks against multiple different versions of Git, are not currently
supported. Users should continue to use "t/perf/run" for those use
cases. The script should get extended at one point in time to support
Meson, but this is outside of the scope of this series.

Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
next
Patrick Steinhardt 2025-04-28 09:30:49 +02:00 committed by Junio C Hamano
parent 5756ccd181
commit d84eefaeea
3 changed files with 91 additions and 4 deletions

View File

@ -70,9 +70,15 @@
# # Execute single test interactively such that features like `debug ()` work.
# $ meson test -i --test-args='-ix' t1400-update-ref
#
# Test execution is parallelized by default and scales with the number of
# processor cores available. You can change the number of processes by passing
# the `-jN` flag to `meson test`.
# # Execute all benchmarks.
# $ meson test -i --benchmark
#
# # Execute single benchmark.
# $ meson test -i --benchmark p0000-*
#
# Test execution (but not benchmark execution) is parallelized by default and
# scales with the number of processor cores available. You can change the
# number of processes by passing the `-jN` flag to `meson test`.
#
# 4. Install the Git distribution. Again, this can be done via Meson, Ninja or
# Samurai:
@ -235,6 +241,7 @@ git = find_program('git', dirs: program_path, native: true, required: false)
sed = find_program('sed', dirs: program_path, native: true)
shell = find_program('sh', dirs: program_path, native: true)
tar = find_program('tar', dirs: program_path, native: true)
time = find_program('time', dirs: program_path, required: get_option('benchmarks'))

target_shell = find_program('sh', dirs: program_path, native: false)

@ -836,7 +843,7 @@ endif
# features. It is optional if you want to neither execute tests nor use any of
# these optional features.
perl_required = get_option('perl')
if get_option('gitweb').enabled() or 'netrc' in get_option('credential_helpers') or get_option('docs') != []
if get_option('benchmarks').enabled() or get_option('gitweb').enabled() or 'netrc' in get_option('credential_helpers') or get_option('docs') != []
perl_required = true
endif

@ -2082,6 +2089,7 @@ meson.add_dist_script(
)

summary({
'benchmarks': get_option('tests') and perl.found() and time.found(),
'curl': curl.found(),
'expat': expat.found(),
'gettext': intl.found(),

View File

@ -101,6 +101,8 @@ option('docs_backend', type: 'combo', choices: ['asciidoc', 'asciidoctor', 'auto
description: 'Which backend to use to generate documentation.')

# Testing.
option('benchmarks', type: 'feature', value: 'auto',
description: 'Enable benchmarks. This requires Perl and GNU time.')
option('coccinelle', type: 'feature', value: 'auto',
description: 'Provide a coccicheck target that generates a Coccinelle patch.')
option('tests', type: 'boolean', value: true,

View File

@ -1097,11 +1097,71 @@ integration_tests = [
't9903-bash-prompt.sh',
]

benchmarks = [
'perf/p0000-perf-lib-sanity.sh',
'perf/p0001-rev-list.sh',
'perf/p0002-read-cache.sh',
'perf/p0003-delta-base-cache.sh',
'perf/p0004-lazy-init-name-hash.sh',
'perf/p0005-status.sh',
'perf/p0006-read-tree-checkout.sh',
'perf/p0007-write-cache.sh',
'perf/p0008-odb-fsync.sh',
'perf/p0071-sort.sh',
'perf/p0090-cache-tree.sh',
'perf/p0100-globbing.sh',
'perf/p1006-cat-file.sh',
'perf/p1400-update-ref.sh',
'perf/p1450-fsck.sh',
'perf/p1451-fsck-skip-list.sh',
'perf/p1500-graph-walks.sh',
'perf/p2000-sparse-operations.sh',
'perf/p3400-rebase.sh',
'perf/p3404-rebase-interactive.sh',
'perf/p4000-diff-algorithms.sh',
'perf/p4001-diff-no-index.sh',
'perf/p4002-diff-color-moved.sh',
'perf/p4205-log-pretty-formats.sh',
'perf/p4209-pickaxe.sh',
'perf/p4211-line-log.sh',
'perf/p4220-log-grep-engines.sh',
'perf/p4221-log-grep-engines-fixed.sh',
'perf/p5302-pack-index.sh',
'perf/p5303-many-packs.sh',
'perf/p5304-prune.sh',
'perf/p5310-pack-bitmaps.sh',
'perf/p5311-pack-bitmaps-fetch.sh',
'perf/p5312-pack-bitmaps-revs.sh',
'perf/p5313-pack-objects.sh',
'perf/p5314-name-hash.sh',
'perf/p5326-multi-pack-bitmaps.sh',
'perf/p5332-multi-pack-reuse.sh',
'perf/p5333-pseudo-merge-bitmaps.sh',
'perf/p5550-fetch-tags.sh',
'perf/p5551-fetch-rescan.sh',
'perf/p5600-partial-clone.sh',
'perf/p5601-clone-reference.sh',
'perf/p6100-describe.sh',
'perf/p6300-for-each-ref.sh',
'perf/p7000-filter-branch.sh',
'perf/p7102-reset.sh',
'perf/p7300-clean.sh',
'perf/p7519-fsmonitor.sh',
'perf/p7527-builtin-fsmonitor.sh',
'perf/p7810-grep.sh',
'perf/p7820-grep-engines.sh',
'perf/p7821-grep-engines-fixed.sh',
'perf/p7822-grep-perl-character.sh',
'perf/p9210-scalar.sh',
'perf/p9300-fast-import-export.sh',
]

# Sanity check that we are not missing any tests present in 't/'. This check
# only runs once at configure time and is thus best-effort, only. It is
# sufficient to catch missing test suites in our CI though.
foreach glob, tests : {
't[0-9][0-9][0-9][0-9]-*.sh': integration_tests,
'perf/p[0-9][0-9][0-9][0-9]-*.sh': benchmarks,
'unit-tests/t-*.c': unit_test_programs,
'unit-tests/u-*.c': clar_test_suites,
}
@ -1153,3 +1213,20 @@ foreach integration_test : integration_tests
timeout: 0,
)
endforeach

if perl.found() and time.found()
benchmark_environment = test_environment
benchmark_environment.set('GTIME', time.full_path())

foreach benchmark : benchmarks
benchmark(fs.stem(benchmark), shell,
args: [
fs.name(benchmark),
],
workdir: meson.current_source_dir() / 'perf',
env: benchmark_environment,
depends: test_dependencies + bin_wrappers,
timeout: 0,
)
endforeach
endif