Skip to content

Adds benchmark on demand workflow #9

Adds benchmark on demand workflow

Adds benchmark on demand workflow #9

name: On-demand Benchmark
on:
pull_request:
types: [labeled]
concurrency:
group: valkey-pr-benchmark
cancel-in-progress: false
defaults:
run:
shell: 'bash -Eeuo pipefail -x {0}'
permissions:
contents: read
jobs:
benchmark:
if: |
github.event.action == 'labeled' &&
(github.event.label.name == 'run-benchmark' || github.event.label.name == 'run-cluster-benchmark')
runs-on: [self-hosted, ec2-ubuntu-24.04-benchmarking]
steps:
- name: Checkout valkey
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
path: valkey
fetch-depth: 0
ref: ${{ github.event.pull_request.head.sha }}
clean: true
- name: Checkout valkey-perf-benchmark
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
repository: valkey-io/valkey-perf-benchmark
path: valkey-perf-benchmark
fetch-depth: 1
clean: true
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Install dependencies
working-directory: valkey-perf-benchmark
run: |
sudo apt-get update
sudo apt-get install -y build-essential
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Run benchmarks
working-directory: valkey-perf-benchmark
env:
PR_SHA: ${{ github.event.pull_request.head.sha }}
LABEL_NAME: ${{ github.event.label.name }}
run: |
# Label-specific config file
if [[ "$LABEL_NAME" == "run-benchmark" ]]; then
CONFIG_FILE="../valkey/.github/benchmark_configs/test_config.json"
elif [[ "$LABEL_NAME" == "run-cluster-benchmark" ]]; then
CONFIG_FILE="./configs/benchmark-configs-cluster-tls.json"
else
echo "Unknown label: $LABEL_NAME"; exit 1
fi
python ./benchmark.py \
--commits "$PR_SHA" \
--baseline "unstable" \
--results-dir "results" \
--valkey-path "../valkey" \
--config "$CONFIG_FILE" \
--server-cpu-range 92-93 \
--client-cpu-range 94-95
- name: Compare results
working-directory: valkey-perf-benchmark
env:
PR_SHA: ${{ github.event.pull_request.head.sha }}
run: |
python ./utils/compare_benchmark_results.py \
./results/unstable/metrics.json \
./results/$PR_SHA/metrics.json \
../comparison.md
- name: Upload artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: pr-${{ github.event.pull_request.number }}-bench-${{ github.run_id }}
path: |
./valkey-perf-benchmark/results/${{ github.event.pull_request.head.sha }}/metrics.json
./valkey-perf-benchmark/results/unstable/metrics.json
./comparison.md