Merge branch 'ps/meson-build-perf-bench'

The build procedure based on Meson learned to drive the
benchmarking tests.

* ps/meson-build-perf-bench:
  meson: wire up benchmarking options
  meson: wire up benchmarks
  t/perf: fix benchmarks with out-of-tree builds
  t/perf: use configured PERL_PATH
  t/perf: fix benchmarks with alternate repo formats
maint
Junio C Hamano 2025-05-05 14:56:24 -07:00
commit cc14ba68d7
6 changed files with 133 additions and 14 deletions

View File

@ -70,9 +70,15 @@
# # Execute single test interactively such that features like `debug ()` work.
# $ meson test -i --test-args='-ix' t1400-update-ref
#
# Test execution is parallelized by default and scales with the number of
# processor cores available. You can change the number of processes by passing
# the `-jN` flag to `meson test`.
# # Execute all benchmarks.
# $ meson test -i --benchmark
#
# # Execute single benchmark.
# $ meson test -i --benchmark p0000-*
#
# Test execution (but not benchmark execution) is parallelized by default and
# scales with the number of processor cores available. You can change the
# number of processes by passing the `-jN` flag to `meson test`.
#
# 4. Install the Git distribution. Again, this can be done via Meson, Ninja or
# Samurai:
@ -235,6 +241,7 @@ git = find_program('git', dirs: program_path, native: true, required: false)
sed = find_program('sed', dirs: program_path, native: true)
shell = find_program('sh', dirs: program_path, native: true)
tar = find_program('tar', dirs: program_path, native: true)
time = find_program('time', dirs: program_path, required: get_option('benchmarks'))

target_shell = find_program('sh', dirs: program_path, native: false)

@ -699,11 +706,11 @@ builtin_sources += custom_target(
# build options to our tests.
build_options_config = configuration_data()
build_options_config.set('GIT_INTEROP_MAKE_OPTS', '')
build_options_config.set('GIT_PERF_LARGE_REPO', '')
build_options_config.set_quoted('GIT_PERF_LARGE_REPO', get_option('benchmark_large_repo'))
build_options_config.set('GIT_PERF_MAKE_COMMAND', '')
build_options_config.set('GIT_PERF_MAKE_OPTS', '')
build_options_config.set('GIT_PERF_REPEAT_COUNT', '')
build_options_config.set('GIT_PERF_REPO', '')
build_options_config.set_quoted('GIT_PERF_REPEAT_COUNT', get_option('benchmark_repeat_count').to_string())
build_options_config.set_quoted('GIT_PERF_REPO', get_option('benchmark_repo'))
build_options_config.set('GIT_TEST_CMP_USE_COPIED_CONTEXT', '')
build_options_config.set('GIT_TEST_INDEX_VERSION', '')
build_options_config.set('GIT_TEST_OPTS', '')
@ -836,7 +843,7 @@ endif
# features. It is optional if you want to neither execute tests nor use any of
# these optional features.
perl_required = get_option('perl')
if get_option('gitweb').enabled() or 'netrc' in get_option('credential_helpers')
if get_option('benchmarks').enabled() or get_option('gitweb').enabled() or 'netrc' in get_option('credential_helpers')
perl_required = true
endif

@ -2082,6 +2089,7 @@ meson.add_dist_script(
)

summary({
'benchmarks': get_option('tests') and perl.found() and time.found(),
'curl': curl.found(),
'expat': expat.found(),
'gettext': intl.found(),

View File

@ -101,6 +101,14 @@ option('docs_backend', type: 'combo', choices: ['asciidoc', 'asciidoctor', 'auto
description: 'Which backend to use to generate documentation.')

# Testing.
option('benchmarks', type: 'feature', value: 'auto',
description: 'Enable benchmarks. This requires Perl and GNU time.')
option('benchmark_repo', type: 'string', value: '',
description: 'Repository to copy for the performance tests. Should be at least the size of the Git repository.')
option('benchmark_large_repo', type: 'string', value: '',
description: 'Large repository to copy for the performance tests. Should be at least the size of the Linux repository.')
option('benchmark_repeat_count', type: 'integer', value: 3,
description: 'Number of times a test should be repeated for best-of-N measurements.')
option('coccinelle', type: 'feature', value: 'auto',
description: 'Provide a coccicheck target that generates a Coccinelle patch.')
option('tests', type: 'boolean', value: true,

View File

@ -1097,11 +1097,71 @@ integration_tests = [
't9903-bash-prompt.sh',
]

benchmarks = [
'perf/p0000-perf-lib-sanity.sh',
'perf/p0001-rev-list.sh',
'perf/p0002-read-cache.sh',
'perf/p0003-delta-base-cache.sh',
'perf/p0004-lazy-init-name-hash.sh',
'perf/p0005-status.sh',
'perf/p0006-read-tree-checkout.sh',
'perf/p0007-write-cache.sh',
'perf/p0008-odb-fsync.sh',
'perf/p0071-sort.sh',
'perf/p0090-cache-tree.sh',
'perf/p0100-globbing.sh',
'perf/p1006-cat-file.sh',
'perf/p1400-update-ref.sh',
'perf/p1450-fsck.sh',
'perf/p1451-fsck-skip-list.sh',
'perf/p1500-graph-walks.sh',
'perf/p2000-sparse-operations.sh',
'perf/p3400-rebase.sh',
'perf/p3404-rebase-interactive.sh',
'perf/p4000-diff-algorithms.sh',
'perf/p4001-diff-no-index.sh',
'perf/p4002-diff-color-moved.sh',
'perf/p4205-log-pretty-formats.sh',
'perf/p4209-pickaxe.sh',
'perf/p4211-line-log.sh',
'perf/p4220-log-grep-engines.sh',
'perf/p4221-log-grep-engines-fixed.sh',
'perf/p5302-pack-index.sh',
'perf/p5303-many-packs.sh',
'perf/p5304-prune.sh',
'perf/p5310-pack-bitmaps.sh',
'perf/p5311-pack-bitmaps-fetch.sh',
'perf/p5312-pack-bitmaps-revs.sh',
'perf/p5313-pack-objects.sh',
'perf/p5314-name-hash.sh',
'perf/p5326-multi-pack-bitmaps.sh',
'perf/p5332-multi-pack-reuse.sh',
'perf/p5333-pseudo-merge-bitmaps.sh',
'perf/p5550-fetch-tags.sh',
'perf/p5551-fetch-rescan.sh',
'perf/p5600-partial-clone.sh',
'perf/p5601-clone-reference.sh',
'perf/p6100-describe.sh',
'perf/p6300-for-each-ref.sh',
'perf/p7000-filter-branch.sh',
'perf/p7102-reset.sh',
'perf/p7300-clean.sh',
'perf/p7519-fsmonitor.sh',
'perf/p7527-builtin-fsmonitor.sh',
'perf/p7810-grep.sh',
'perf/p7820-grep-engines.sh',
'perf/p7821-grep-engines-fixed.sh',
'perf/p7822-grep-perl-character.sh',
'perf/p9210-scalar.sh',
'perf/p9300-fast-import-export.sh',
]

# Sanity check that we are not missing any tests present in 't/'. This check
# only runs once at configure time and is thus best-effort, only. It is
# sufficient to catch missing test suites in our CI though.
foreach glob, tests : {
't[0-9][0-9][0-9][0-9]-*.sh': integration_tests,
'perf/p[0-9][0-9][0-9][0-9]-*.sh': benchmarks,
'unit-tests/t-*.c': unit_test_programs,
'unit-tests/u-*.c': clar_test_suites,
}
@ -1153,3 +1213,20 @@ foreach integration_test : integration_tests
timeout: 0,
)
endforeach

if perl.found() and time.found()
benchmark_environment = test_environment
benchmark_environment.set('GTIME', time.full_path())

foreach benchmark : benchmarks
benchmark(fs.stem(benchmark), shell,
args: [
fs.name(benchmark),
],
workdir: meson.current_source_dir() / 'perf',
env: benchmark_environment,
depends: test_dependencies + bin_wrappers,
timeout: 0,
)
endforeach
endif

View File

@ -1,4 +1,4 @@
#!/usr/bin/perl
#!/usr/bin/env perl

use lib '../../perl/build/lib';
use strict;

View File

@ -36,7 +36,31 @@ git_perf_settings="$(env |
s/^\\([^=]*=\\)\\(.*\\)/\\1'\\2'/p
}")"

. ../test-lib.sh
# While test-lib.sh computes the build directory for us, we also have to do the
# same thing in order to locate the script via GIT-BUILD-OPTIONS in the first
# place.
GIT_BUILD_DIR="${GIT_BUILD_DIR:-$TEST_DIRECTORY/..}"
if test -f "$GIT_BUILD_DIR/GIT-BUILD-DIR"
then
GIT_BUILD_DIR="$(cat "$GIT_BUILD_DIR/GIT-BUILD-DIR")" || exit 1
# On Windows, we must convert Windows paths lest they contain a colon
case "$(uname -s)" in
*MINGW*)
GIT_BUILD_DIR="$(cygpath -au "$GIT_BUILD_DIR")"
;;
esac
fi

if test ! -f "$GIT_BUILD_DIR"/GIT-BUILD-OPTIONS
then
echo >&2 'error: GIT-BUILD-OPTIONS missing (has Git been built?).'
exit 1
fi

. "$GIT_BUILD_DIR"/GIT-BUILD-OPTIONS
. "$GIT_SOURCE_DIR"/t/test-lib.sh

# Then restore GIT_PERF_* settings.
eval "$git_perf_settings"

unset GIT_CONFIG_NOSYSTEM
@ -110,6 +134,8 @@ test_perf_create_repo_from () {
source_git="$("$MODERN_GIT" -C "$source" rev-parse --git-dir)"
objects_dir="$("$MODERN_GIT" -C "$source" rev-parse --git-path objects)"
common_dir="$("$MODERN_GIT" -C "$source" rev-parse --git-common-dir)"
refformat="$("$MODERN_GIT" -C "$source" rev-parse --show-ref-format)"
objectformat="$("$MODERN_GIT" -C "$source" rev-parse --show-object-format)"
mkdir -p "$repo/.git"
(
cd "$source" &&
@ -126,7 +152,7 @@ test_perf_create_repo_from () {
) &&
(
cd "$repo" &&
"$MODERN_GIT" init -q &&
"$MODERN_GIT" init -q --ref-format="$refformat" --object-format="$objectformat" &&
test_perf_do_repo_symlink_config_ &&
mv .git/hooks .git/hooks-disabled 2>/dev/null &&
if test -f .git/index.lock
@ -286,7 +312,7 @@ test_perf_ () {
else
test_ok_ "$1"
fi
"$TEST_DIRECTORY"/perf/min_time.perl test_time.* >"$base".result
"$PERL_PATH" "$TEST_DIRECTORY"/perf/min_time.perl test_time.* >"$base".result
rm test_time.*
}

@ -334,7 +360,7 @@ test_at_end_hook_ () {
if test -z "$GIT_PERF_AGGREGATING_LATER"; then
(
cd "$TEST_DIRECTORY"/perf &&
./aggregate.perl --results-dir="$TEST_RESULTS_DIR" $(basename "$0")
"$PERL_PATH" "$GIT_SOURCE_DIR"/t/perf/aggregate.perl --results-dir="$TEST_RESULTS_DIR" $(basename "$0")
)
fi
}

View File

@ -192,10 +192,10 @@ run_subsection () {

if test -z "$GIT_PERF_SEND_TO_CODESPEED"
then
./aggregate.perl --results-dir="$TEST_RESULTS_DIR" $codespeed_opt "$@"
"$PERL_PATH" ./aggregate.perl --results-dir="$TEST_RESULTS_DIR" $codespeed_opt "$@"
else
json_res_file=""$TEST_RESULTS_DIR"/$GIT_PERF_SUBSECTION/aggregate.json"
./aggregate.perl --results-dir="$TEST_RESULTS_DIR" --codespeed "$@" | tee "$json_res_file"
"$PERL_PATH" ./aggregate.perl --results-dir="$TEST_RESULTS_DIR" --codespeed "$@" | tee "$json_res_file"
send_data_url="$GIT_PERF_SEND_TO_CODESPEED/result/add/json/"
curl -v --request POST --data-urlencode "json=$(cat "$json_res_file")" "$send_data_url"
fi