Skip to content

Run performance benchmarks as CI job #6690

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
79 changes: 79 additions & 0 deletions .github/workflows/performance-compare.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
#!/usr/bin/env python3

from collections import defaultdict

import csv
import sys


def compare(results):
differences = []

for bm in results:
if not results[bm].get("base"):
print("Missing base result for " + bm)
differences.append(bm)
continue
if not results[bm].get("pr"):
print("Missing pr result for " + bm)
differences.append(bm)
continue

b = results[bm]["base"]
p = results[bm]["pr"]

if b["Result"] != p["Result"]:
print("Verification results differ for " + bm)
differences.append(bm)
continue

ut_base = float(b["usertime"])
ut_pr = float(p["usertime"])
ut_diff = ut_pr - ut_base
if abs(ut_diff) > 1.5:
change = "improvement" if ut_diff < 0 else "regression"
ut_rel = ut_diff / ut_base * 100.0
print("Usertime {} for {}: {}s ({}%)".format(
change, bm, round(ut_diff, 2), round(ut_rel, 2)))
differences.append(bm)

mem_base = int(b["maxmem"][:-2])
mem_pr = int(p["maxmem"][:-2])
mem_diff = (mem_pr - mem_base) / 1024.0
if abs(mem_diff) > 5.0:
change = "improvement" if mem_diff < 0 else "regression"
mem_rel = mem_diff / mem_base * 100.0
print("Maxmem {} for {}: {}MB ({}%)".format(
change, bm, round(mem_diff, 2), round(mem_rel, 2)))
differences.append(bm)

return differences


def main():
base = sys.argv[1]
pr = sys.argv[2]

results = defaultdict(defaultdict)

with open(base) as base_file, open(pr) as pr_file:
base_reader = csv.DictReader(base_file)
pr_reader = csv.DictReader(pr_file)

for b in base_reader:
results[b["Benchmark"]]["base"] = b
for p in pr_reader:
results[p["Benchmark"]]["pr"] = p

differences = compare(results)

for d in differences:
print("base: " + str(results[d]["base"]))
print("pr: " + str(results[d]["pr"]))

return 1 if len(differences) else 0


if __name__ == "__main__":
rc = main()
sys.exit(rc)
86 changes: 86 additions & 0 deletions .github/workflows/performance.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
name: Performance benchmarking

on:
pull_request:
branches: [ develop ]

jobs:
run-llbmc-benchmarks:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
with:
submodules: recursive
fetch-depth: 0
- name: Fetch dependencies
env:
# This is needed in addition to -yq to prevent apt-get from asking for
# user input
DEBIAN_FRONTEND: noninteractive
run: |
sudo apt-get install --no-install-recommends -y build-essential flex bison maven ccache
sudo apt-get install --no-install-recommends -y bmt poppler-utils texlive-latex-base texlive-pictures
make -C src minisat2-download
- name: Prepare ccache
uses: actions/cache@v2
with:
path: .ccache
key: ${{ runner.os }}-20.04-make-${{ github.ref }}-${{ github.sha }}-Performance
restore-keys: |
${{ runner.os }}-20.04-make-${{ github.ref }}
${{ runner.os }}-20.04-make
- name: ccache environment
run: |
echo "CCACHE_BASEDIR=$PWD" >> $GITHUB_ENV
echo "CCACHE_DIR=$PWD/.ccache" >> $GITHUB_ENV
- name: Zero ccache stats and limit in size
run: ccache -z --max-size=500M
- name: Build with make
run: |
make -C src CXX='ccache /usr/bin/g++' cbmc.dir goto-cc.dir -j2
mkdir pr-bin
cp src/cbmc/cbmc pr-bin/
cp src/goto-cc/goto-cc pr-bin/
cp .github/workflows/performance-compare.py pr-bin/
echo "PR_BIN=$PWD/pr-bin" >> $GITHUB_ENV
- name: Build base with make
run: |
git checkout -b ${{ github.ref }}
git checkout ${{ github.base_ref }}
git checkout -b base ${{ github.base_ref }}
make -C src CXX='ccache /usr/bin/g++' cbmc.dir goto-cc.dir -j2
mkdir base-bin
cp src/cbmc/cbmc base-bin/
cp src/goto-cc/goto-cc base-bin/
echo "BASE_BIN=$PWD/base-bin" >> $GITHUB_ENV
- name: Print ccache stats
run: ccache -s
- name: Run LLBMC benchmarks
run: |
git clone --depth=1 https://github.com/tautschnig/bmt.git bmt.git
cd bmt.git/pkgs
tar czf ../llbmc-bench.cprover-bm.tar.gz llbmc-bench
cd ..
cpbm unpack orig-src/llbmc-bench-vstte-2012.tgz llbmc-bench.cprover-bm.tar.gz
cd llbmc-bench
sed -i 's/\(ex36.*\)/\1 --unwind 11/' cprover/cbmc_opts
export PATH=$BASE_BIN:$PATH
cprover/rules -j2 table CONFIG=cbmc.base
export PATH=$PR_BIN:$PATH
rm -rf build
cprover/rules -j2 table CONFIG=cbmc.pr
# avoid downloading PGF as we use the Ubuntu package
mkdir -p base pgfplots
cpbm graph -s cprover/results.cbmc.base.csv cprover/results.cbmc.pr.csv
pdftoppm -png results.cbmc.base_results.cbmc.pr-scatter.pdf scatter
- uses: actions/upload-artifact@v2
with:
name: scatter.png
path: bmt.git/llbmc-bench/scatter-1.png
if-no-files-found: error
- name: Compare results
run: |
cd bmt.git/llbmc-bench
cut -d, -f2,3,5,10,13,17 cprover/results.cbmc.base.csv > base.csv
cut -d, -f2,3,5,10,13,17 cprover/results.cbmc.pr.csv > pr.csv
$PR_BIN/performance-compare.py base.csv pr.csv