[bm] add plots and tables of the benchmark results

This commit is contained in:
Joao Paulo Magalhaes
2022-08-26 16:51:01 +02:00
parent 0f0a66b7b7
commit 1a08a01a1e
3 changed files with 294 additions and 14 deletions

View File

@@ -24,8 +24,48 @@ env:
jobs:
gettag:
runs-on: ubuntu-latest
steps:
# use fetch-depth to ensure all tags are fetched
- {name: checkout, uses: actions/checkout@v2, with: {submodules: recursive, fetch-depth: 0}}
- name: Variables (from tag)
if: contains(github.ref, 'tags/v')
run: |
# https://github.community/t/how-to-get-just-the-tag-name/16241/11
SRC_TAG=${GITHUB_REF#refs/tags/}
SRC_VERSION=${GITHUB_REF#refs/tags/v}
cat <<EOF > vars.sh
export SRC_TAG=$SRC_TAG
export SRC_VERSION=$SRC_VERSION
EOF
- name: Variables (from commit, no tag)
if: ${{ !contains(github.ref, 'tags/v') }}
run: |
set -x
branch_name=${GITHUB_REF#refs/heads/}
# builds triggered from PRs have the branch_name like this: refs/pull/150/merge
# so filter to eg pr0150_merge
branch_name=`echo $branch_name | sed "s:refs/pull/\([0-9]*\)/\(.*\):pr0\1_\2:"`
# sanitize the branch name; eg merge/foo-bar -> merge_foo_bar
branch_name=`echo $branch_name | sed 's:[/.-]:_:g'`
git config --global --add safe.directory $(pwd)
SRC_TAG=$(git describe || git rev-parse --short HEAD) # eg v0.2.0-110-gda837e0
SRC_VERSION="${branch_name}-${SRC_TAG}"
cat <<EOF > vars.sh
export SRC_TAG=$SRC_TAG
export SRC_VERSION=$SRC_VERSION
EOF
- name: Verify vars.sh
run: cat vars.sh ; source vars.sh ; echo $SRC_TAG ; echo $SRC_VERSION
- name: Save vars.sh
uses: actions/upload-artifact@v1
with: {name: vars.sh, path: ./vars.sh}
benchmarks:
name: bm/c++${{matrix.std}}/${{matrix.cxx}}/${{matrix.bt}}
needs: gettag
if: |
(!contains(github.event.head_commit.message, 'skip all')) ||
(!contains(github.event.head_commit.message, 'skip benchmarks')) ||
@@ -36,46 +76,62 @@ jobs:
fail-fast: false
matrix:
include:
- {std: 11, cxx: g++-10, bt: Debug , os: ubuntu-18.04, bitlinks: static64 static32}
- {std: 11, cxx: g++-10, bt: Release, os: ubuntu-18.04, bitlinks: static64 static32}
- {std: 17, cxx: g++-10, bt: Debug , os: ubuntu-18.04, bitlinks: static64 static32}
- {std: 17, cxx: g++-10, bt: Release, os: ubuntu-18.04, bitlinks: static64 static32}
- {std: 20, cxx: g++-10, bt: Debug , os: ubuntu-18.04, bitlinks: static64 static32}
- {std: 20, cxx: g++-10, bt: Release, os: ubuntu-18.04, bitlinks: static64 static32}
- {std: 11, cxx: vs2019, bt: Debug , os: windows-2019, bitlinks: static64 static32}
- {std: 11, cxx: vs2019, bt: Release, os: windows-2019, bitlinks: static64 static32}
- {std: 17, cxx: vs2019, bt: Debug , os: windows-2019, bitlinks: static64 static32}
#
- {std: 17, cxx: vs2019, bt: Release, os: windows-2019, bitlinks: static64 static32}
- {std: 20, cxx: vs2019, bt: Debug , os: windows-2019, bitlinks: static64 static32}
- {std: 20, cxx: vs2019, bt: Release, os: windows-2019, bitlinks: static64 static32}
- {std: 17, cxx: vs2022, bt: Release, os: windows-2022, bitlinks: static64 static32}
- {std: 20, cxx: vs2022, bt: Release, os: windows-2022, bitlinks: static64 static32}
#
- {std: 17, cxx: xcode, xcver: 13, bt: Release, os: macos-11, bitlinks: static64}
env: {BM: ON, STD: "${{matrix.std}}", CXX_: "${{matrix.cxx}}", BT: "${{matrix.bt}}", BITLINKS: "${{matrix.bitlinks}}", VG: "${{matrix.vg}}", SAN: "${{matrix.san}}", LINT: "${{matrix.lint}}", OS: "${{matrix.os}}"}
steps:
# use fetch-depth to ensure all tags are fetched
- {name: checkout, uses: actions/checkout@v2, with: {submodules: recursive, fetch-depth: 0}}
- {name: install requirements, run: source .github/reqs.sh && c4_install_test_requirements $OS}
- name: Download vars.sh
uses: actions/download-artifact@v1
with: {name: vars.sh, path: ./}
- {name: show info, run: source .github/setenv.sh && c4_show_info}
- name: Install python 3.10 for plotting
uses: actions/setup-python@v2
with: { python-version: '3.10' }
- name: install benchmark plotting dependencies
run: |
which python
which pip
python --version
pip --version
pip install -v -r cmake/bm-xp/requirements.txt
python -c 'import munch ; print("ok!") ; exit(0)'
echo $?
- name: shared64-configure---------------------------------------------------
run: source .github/setenv.sh && c4_cfg_test shared64
run: export CMAKE_FLAGS="-DPython_EXECUTABLE=$(which python)" && source .github/setenv.sh && c4_cfg_test shared64
- {name: shared64-build, run: source .github/setenv.sh && c4_build_target shared64 ryml-bm-build}
- {name: shared64-run, run: source .github/setenv.sh && c4_run_target shared64 ryml-bm-run}
- name: static64-configure---------------------------------------------------
run: source .github/setenv.sh && c4_cfg_test static64
run: export CMAKE_FLAGS="-DPython_EXECUTABLE=$(which python)" && source .github/setenv.sh && c4_cfg_test static64
- {name: static64-build, run: source .github/setenv.sh && c4_build_target static64 ryml-bm-build}
- {name: static64-run, run: source .github/setenv.sh && c4_run_target static64 ryml-bm-run}
- name: static32-configure---------------------------------------------------
run: source .github/setenv.sh && c4_cfg_test static32
run: export CMAKE_FLAGS="-DPython_EXECUTABLE=$(which python)" && source .github/setenv.sh && c4_cfg_test static32
- {name: static32-build, run: source .github/setenv.sh && c4_build_target static32 ryml-bm-build}
- {name: static32-run, run: source .github/setenv.sh && c4_run_target static32 ryml-bm-run}
- name: shared32-configure---------------------------------------------------
run: source .github/setenv.sh && c4_cfg_test shared32
run: export CMAKE_FLAGS="-DPython_EXECUTABLE=$(which python)" && source .github/setenv.sh && c4_cfg_test shared32
- {name: shared32-build, run: source .github/setenv.sh && c4_build_target shared32 ryml-bm-build}
- {name: shared32-run, run: source .github/setenv.sh && c4_run_target shared32 ryml-bm-run}
- name: gather benchmark results
run: |
set -x
desc=$(git describe || git rev-parse --short HEAD)
source vars.sh
echo SRC_TAG=$SRC_TAG
echo SRC_VERSION=$SRC_VERSION
desc=$SRC_TAG
for bl in ${{matrix.bitlinks}} ; do
dst=$(echo benchmark_results/$desc/${{matrix.cxx}}-${{matrix.bt}}-c++${{matrix.std}}-$bl | sed 's:++-:xx:g' | sed 's:+:x:g')
dst=$(echo benchmark_results/$desc/x86_64/${{matrix.cxx}}-${{matrix.bt}}-c++${{matrix.std}}-$bl | sed 's:++-:xx:g' | sed 's:+:x:g')
mkdir -p $dst
find build -name bm-results
mv -vf build/$bl/bm/bm-results/* $dst/.

View File

@@ -1,5 +1,9 @@
c4_setup_benchmarking()
add_custom_target(ryml-bm-plot)
c4_set_folder_remote_project_targets(bm/plot ryml-bm-plot)
find_package(Python REQUIRED COMPONENTS Interpreter)
# thirdparty libs that will be compared with ryml
set(_ed ${CMAKE_CURRENT_BINARY_DIR}/subprojects) # casual ryml extern dir (these projects are not part of ryml and are downloaded and compiled on the fly)
@@ -132,8 +136,18 @@ function(ryml_add_bm_comparison_case target name case_file)
endif()
c4_add_target_benchmark(${target} ${case}
FILTER "${filter_json}"
ARGS ${case_file})
ARGS ${case_file}
RESULTS_FILE results_file)
add_dependencies(ryml-bm-${name}-all ryml-bm-${name}-${case})
_c4_set_target_folder(ryml-bm-${name}-${case} bm/run)
add_custom_target(ryml-bm-${name}-${case}-plot
#DEPENDS ${result_files}
COMMAND cmake -E echo "${Python_EXECUTABLE}"
COMMAND ${Python_EXECUTABLE} --version
COMMAND ${Python_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/bm_plot_rapidyaml.py ${name} ${case} ${results_file}
)
_c4_set_target_folder(ryml-bm-${name}-${case}-plot bm/plot)
add_dependencies(ryml-bm-plot ryml-bm-${name}-${case}-plot)
endfunction()

210
bm/bm_plot_rapidyaml.py Normal file
View File

@@ -0,0 +1,210 @@
import sys
import os
import re
thisdir = os.path.dirname(os.path.abspath(__file__))
moddir = os.path.abspath(f"{thisdir}/../ext/c4core/cmake/bm-xp")
sys.path.insert(0, moddir)
import bm_plot as bm
from bm_util import first
from dataclasses import dataclass
import prettytable
def get_function_benchmark(libname, run: bm.BenchmarkRun):
for rbm in run.entries:
if rbm.meta.library == libname:
return rbm
raise Exception(f"lib not found: {libname}. Existing: {[rbm.meta.function for rbm in run.entries]}")
@dataclass
class ParseMeta:
title: str
library: str
variant: str
@classmethod
def make(cls, bm_title: str):
# eg:
#-----------------------
# RESERVE
# bm_ryml_inplace_reuse
# bm_ryml_arena_reuse
# bm_ryml_inplace
# bm_ryml_arena
# bm_libyaml_arena
# bm_libyaml_arena_reuse
# bm_libfyaml_arena
# bm_yamlcpp_arena
# bm_rapidjson_arena
# bm_rapidjson_inplace
# bm_sajson_arena
# bm_sajson_inplace
# bm_jsoncpp_arena
# bm_nlohmann_arena
#-----------------------
# EMIT
# bm_ryml_str_reserve
# bm_ryml_str
# bm_ryml_ostream
# bm_fyaml_str_reserve
# bm_fyaml_str
# bm_fyaml_ostream
# bm_yamlcpp
# bm_rapidjson
# bm_jsoncpp
# bm_nlohmann
if not hasattr(__class__, '_rx'):
__class__._rx = re.compile(r'bm_(ryml|libyaml|libfyaml|fyaml|yamlcpp|rapidjson|sajson|jsoncpp|nlohmann)_?(.*)')
rx = __class__._rx
if not rx.fullmatch(bm_title):
raise Exception(f"cannot understand bm title: {bm_title}")
lib = rx.sub(r'\1', bm_title)
variant = rx.sub(r'\2', bm_title)
return cls(
title=bm_title,
library=lib,
variant=variant,
)
@property
def shortname(self):
return f"{self.library}_{self.variant}"
@property
def shortparams(self):
return "params"
@property
def shorttitle(self):
return self.shortname
EmitMeta = ParseMeta
def plot_bm_bars(bm_panel: bm.BenchmarkPanel, getref,
panel_title_human: str,
outputfile_prefix: str):
assert os.path.isabs(outputfile_prefix), outputfile_prefix
# make a comparison table
atitle = lambda run: first(run.meta).shorttitle
anchor = lambda run: f"{os.path.basename(outputfile_prefix)}-{atitle(run)}"
anchorlink = lambda run: f"<pre><a href='#{anchor(run)}'>{atitle(run)}</a></pre>"
with open(f"{outputfile_prefix}.txt", "w") as tablefile:
with open(f"{outputfile_prefix}.md", "w") as mdfile:
print(f"## {panel_title_human}\n\n<p>Data type benchmark results:</p>\n<ul>\n",
"\n".join([f" <li>{anchorlink(run)}</li>" for run in bm_panel.runs]),
"</ul>\n\n", file=mdfile)
for run in bm_panel.runs:
tabletitle = f"{outputfile_prefix}"
table = prettytable.PrettyTable(title=f"{panel_title_human}")
table.add_column("function", [m.shorttitle for m in run.meta], align="l")
for prop in ("mega_bytes_per_second", "cpu_time_ms"):
ref = getref(run)
bar_values = list(run.extract_plot_series(prop))
bar_values_rel = list(run.extract_plot_series(prop, relative_to_entry=ref))
bar_values_pc = list(run.extract_plot_series(prop, percent_of_entry=ref))
pd = bm_panel.first_run.property_plot_data(prop)
hns = pd.human_name_short
table.add_column(hns, [f"{v_:7.2f}" for v_ in bar_values], align="r")
hns = hns.replace(" (ms)", "")
table.add_column(f"{hns}(x)", [f"{v_:5.2f}x" for v_ in bar_values_rel], align="r")
table.add_column(f"{hns}(%)", [f"{v_:7.2f}%" for v_ in bar_values_pc], align="r")
print(table, "\n\n")
print(table, "\n\n", file=tablefile)
pfx_bps = f"{os.path.basename(outputfile_prefix)}-mega_bytes_per_second"
pfx_cpu = f"{os.path.basename(outputfile_prefix)}-cpu_time_ms"
print(f"""
<br/>
<br/>
---
<a id="{anchor(run)}"/>
### {panel_title_human}
* Interactive html graphs
* [MB/s](./{pfx_bps}.html)
* [CPU time](./{pfx_cpu}.html)
[![{outputfile_prefix}: MB/s](./{pfx_bps}.png)](./{pfx_bps}.png)
[![{outputfile_prefix}: CPU time](./{pfx_cpu}.png)](./{pfx_cpu}.png)
```
{table}
```
""", file=mdfile)
# make plots
for prop in ("mega_bytes_per_second", "cpu_time_ms"):
ps, ps_ = [], []
pd = bm_panel.first_run.property_plot_data(prop)
bar_label = f"{pd.human_name_short}{pd.qty_type.comment}"
outfilename = f"{outputfile_prefix}-{prop}"
for run in bm_panel.runs:
bar_names = [m.shorttitle for m in run.meta]
bar_values = list(run.extract_plot_series(prop))
runtitle = f"{outfilename}"
# to save each bokeh plot separately and also
# a grid plot with all of them, we have to plot
# twice because bokeh does not allow saving twice
# the same plot from multiple pictures.
plotit = lambda: bm.plot_benchmark_run_as_bars(run, title=f"{panel_title_human}\n{bar_label}",
bar_names=bar_names, bar_values=bar_values, bar_label=bar_label)
# make one plot to save:
p, p_ = plotit()
bm._bokeh_save_html(f"{runtitle}.html", p)
bm._plt_save_png(f"{runtitle}.png")
bm._plt_clear()
# and another to gather:
p, p_ = plotit()
ps.append(p)
ps_.append(p_)
bm._plt_clear()
bm.bokeh_plot_many(ps, f"{outfilename}.html")
def plot_parse(dir_: str, filename, json_files):
fcase = f"parse-{filename}"
panel = bm.BenchmarkPanel(json_files, ParseMeta)
ref = lambda bmrun: get_function_benchmark("yamlcpp", run=bmrun)
plot_bm_bars(panel, ref,
f"parse benchmark: {filename}",
f"{dir_}/ryml-bm-{fcase}")
def plot_emit(dir_: str, filename, json_files):
fcase = f"emit-{filename}"
panel = bm.BenchmarkPanel(json_files, EmitMeta)
ref = lambda bmrun: get_function_benchmark("yamlcpp", run=bmrun)
plot_bm_bars(panel, ref,
f"emit benchmark: {filename}",
f"{dir_}/ryml-bm-{fcase}")
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) < 2:
raise Exception(f"usage: {sys.executable} {sys.argv[0]} <filename> <jsonfile>")
cmd = args[0]
print(cmd)
filename = args[1]
print(filename)
json_files = args[2:]
print(json_files)
dir_ = os.path.dirname(json_files[0])
for jf in json_files:
print("jf:", jf, flush=True)
assert os.path.dirname(jf) == dir_, (os.path.dirname(jf), dir_)
assert os.path.exists(jf), jf
if cmd == "emit":
plot_emit(dir_, filename, json_files)
elif cmd == "parse":
plot_parse(dir_, filename, json_files)
else:
raise Exception(f"not implemented: {cmd}")