Integration of vfc_ci

The vfc_ci tool has been directly added to the repository, since it's
not integrated into Verificarlo yet. The vfc_test_h5.cpp file defines a
test inspired by test_h5.cpp that reads a list of cycles and dump the
vfc_probes for these cycles.
This commit is contained in:
Aurélien Delval 2021-04-29 23:41:23 +02:00
parent bd9bd62fe9
commit 1d1da00931
30 changed files with 3897 additions and 0 deletions

2
.gitignore vendored
View File

@ -10,3 +10,5 @@ Slater*
Updates*
datasets/dataset.*
bin/
*.vfcrun.hd5
*.vfcraw.hd5

64
Makefile.vfc_ci Normal file
View File

@ -0,0 +1,64 @@
## Compiler
CXX = verificarlo-c++
## Compiler flags
H5FLAGS = -I/usr/include/hdf5/serial -lhdf5_serial -lhdf5_cpp
# H5FLAGS = -lhdf5 -lhdf5_cpp
CXXFLAGS = -O0 -g $(H5FLAGS)
INCLUDE = -I $(INC_DIR)/ -I="/usr/include"
DEPS_CXX = $(OBJ_DIR)/SM_MaponiA3.o $(OBJ_DIR)/SM_MaponiA3S.o $(OBJ_DIR)/SM_Standard.o $(OBJ_DIR)/SM_Helpers.o
SRC_DIR := src
TST_DIR := tests
INC_DIR := include
OBJ_DIR := build
BIN_DIR := bin
EXEC := $(BIN_DIR)/vfc_test_h5
## Build tagets
.PHONY: all clean distclean
all: $(EXEC)
clean:
@rm -vrf $(OBJ_DIR)
@rm -vrf $(BIN_DIR)
distclean: clean
@rm -vrf $(BIN_DIR) \
Slater* Updates.dat
#### COMPILING
$(BIN_DIR) $(OBJ_DIR):
mkdir -p $@
### IMPLICIT BUILD RULES
## C++ objects
$(OBJ_DIR)/%.o: $(TST_DIR)/%.cpp $(INC_DIR)/* | $(OBJ_DIR)
$(CXX) $(CXXFLAGS) $(INCLUDE) -c -o $@ $<
$(OBJ_DIR)/%.o: $(SRC_DIR)/%.cpp $(INC_DIR)/* | $(OBJ_DIR)
$(CXX) $(CXXFLAGS) -fPIE $(INCLUDE) -c -o $@ $<
## HDF5/C++ objects
$(OBJ_DIR)/%_h5.o: $(TST_DIR)/%_h5.cpp $(INC_DIR)/* | $(OBJ_DIR)
$(CXX) $(H5CXXFLAGS) $(INCLUDE) -c -o $@ $<
### EXPLICIT BUILD RULES
## special compiler flag -fPIC otherwise h5c++ builds fail
$(OBJ_DIR)/SM_MaponiA3.o: $(SRC_DIR)/SM_MaponiA3.cpp $(INC_DIR)/* | $(OBJ_DIR)
$(CXX) $(CXXFLAGS) -fPIC $(INCLUDE) -c -o $@ $<
$(OBJ_DIR)/SM_MaponiA3S.o: $(SRC_DIR)/SM_MaponiA3S.cpp $(INC_DIR)/* | $(OBJ_DIR)
$(CXX) $(CXXFLAGS) -fPIC $(INCLUDE) -c -o $@ $<
$(OBJ_DIR)/SM_Standard.o: $(SRC_DIR)/SM_Standard.cpp $(INC_DIR)/* | $(OBJ_DIR)
$(CXX) $(CXXFLAGS) -fPIC $(INCLUDE) -c -o $@ $<
#### LINKING
$(BIN_DIR)/vfc_test_h5: $(OBJ_DIR)/vfc_test_h5.o $(DEPS_CXX) | $(BIN_DIR)
$(CXX) $(CXXFLAGS) $(INCLUDE) -o $@ $^ $(H5FLAGS)

0
ci/__init__.py Normal file
View File

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

28
ci/serve.py Executable file
View File

@ -0,0 +1,28 @@
# Server for the Verificarlo CI report. This is simply a wrapper to avoid
# calling Bokeh directly.
import os
def serve(show, git_directory, git_url, port, allow_origin, logo_url):
# Prepare arguments
show = "--show" if show else ""
git = ""
if git_directory != None:
git = "git directory %s" % git_directory
if git_url != None:
git = "git url %s" % git_url
logo = ""
if logo_url != None:
logo = "logo %s" % logo_url
dirname = os.path.dirname(__file__)
# Call the "bokeh serve" command on the system
command = "bokeh serve %s/vfc_ci_report %s --allow-websocket-origin=%s:%s --port %s --args %s %s" \
% (dirname, show, allow_origin, port, port, git, logo)
os.system(command)

155
ci/setup.py Normal file
View File

@ -0,0 +1,155 @@
# Helper script to set up Verificarlo CI on the current branch
import git
import sys
import os
from jinja2 import Environment, FileSystemLoader
################################################################################
# Helper functions
def gen_readme(dev_branch, ci_branch):
# Init template loader
path = os.path.dirname(os.path.abspath(__file__))
env = Environment(loader=FileSystemLoader(path))
template = env.get_template("workflow_templates/ci_README.j2.md")
# Render template
render = template.render(dev_branch=dev_branch, ci_branch=ci_branch)
# Write template
with open("README.md", "w") as fh:
fh.write(render)
def gen_workflow(git_host, dev_branch, ci_branch, repo):
# Init template loader
path = os.path.dirname(os.path.abspath(__file__))
env = Environment(loader=FileSystemLoader(path))
if git_host == "github":
# Load template
template = env.get_template("workflow_templates/vfc_test_workflow.j2.yml")
# Render it
render = template.render(dev_branch=dev_branch, ci_branch=ci_branch)
# Write the file
filename = ".github/workflows/vfc_test_workflow.yml"
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as fh:
fh.write(render)
if git_host == "gitlab":
template = env.get_template("workflow_templates/gitlab-ci.j2.yml")
# Ask for the user who will run the jobs (Gitlab specific)
username = input("[vfc_ci] Enter the name of the user who will run the CI jobs:")
email = input("[vfc_ci] Enter the e-mail of the user who will run the CI jobs:")
remote_url = repo.remotes[0].config_reader.get("url")
remote_url = remote_url.replace("http://", "")
remote_url = remote_url.replace("https://", "")
render = template.render(
dev_branch=dev_branch,
ci_branch=ci_branch,
username=username,
email=email,
remote_url = remote_url
)
filename = ".gitlab-ci.yml"
with open(filename, "w") as fh:
fh.write(render)
################################################################################
def setup(git_host):
# Init repo and make sure that the workflow setup is possible
repo = git.Repo(".")
repo.remotes.origin.fetch()
# Make sure that repository is clean
assert(not repo.is_dirty()), "Error [vfc_ci]: Unstaged changes detected " \
"in your work tree."
dev_branch = repo.active_branch
dev_branch_name = str(dev_branch)
dev_remote = dev_branch.tracking_branch()
# Make sure that the active branch (on which to setup the workflow) has a remote
assert(dev_remote != None), "Error [vfc_ci]: The current branch doesn't " \
"have a remote."
# Make sure that we are not behind the remote (so we can push safely later)
rev = "%s...%s" % (dev_branch_name, str(dev_remote))
commits_behind = list(repo.iter_commits(rev))
assert(commits_behind == []), "Error [vfc_ci]: The local branch seems " \
"to be at least one commit behind remote."
# Commit the workflow on the current (dev) branch
ci_branch_name = "vfc_ci_%s" % dev_branch_name
gen_workflow(git_host, dev_branch_name, ci_branch_name, repo)
repo.git.add(".")
repo.index.commit("[auto] Set up Verificarlo CI on this branch")
repo.remote(name="origin").push()
# Create the CI branch (orphan branch with a readme on it)
# (see : https://github.com/gitpython-developers/GitPython/issues/615)
repo.head.reference = git.Head(repo, "refs/heads/"+ ci_branch_name)
repo.index.remove(["*"])
gen_readme(dev_branch_name, ci_branch_name)
repo.index.add(["README.md"])
repo.index.commit(
"[auto] Create the Verificarlo CI branch for %s" % dev_branch_name,
parent_commits=None
)
repo.remote(name="origin").push(
refspec="%s:%s" % (ci_branch_name, ci_branch_name)
)
# Force checkout back to the original (dev) branch
repo.git.checkout(dev_branch_name, force=True)
# Print termination messages
print(
"Info [vfc_ci]: A Verificarlo CI workflow has been setup on " \
"%s." % dev_branch_name
)
print(
"Info [vfc_ci]: Make sure that you have a \"vfc_tests_config.json\" on " \
"this branch. You can also perform a \"vfc_ci test\" dry run before "\
"pushing other commits."
)
if git_host == "gitlab":
print(
"Info [vfc_ci]: Since you are using GitLab, make sure that you " \
"have created an access token for the user you specified (registered "\
"as a variable called \"CI_PUSH_TOKEN\" in your repository)."
)

387
ci/test.py Executable file
View File

@ -0,0 +1,387 @@
# This script reads the vfc_tests_config.json file and executes tests accordingly
# It will also generate a ... .vfcrun.hd5 file with the results of the run
import os
import json
import calendar
import time
# Forcing an older pickle protocol allows backwards compatibility when reading
# HDF5 written in 3.8+ using an older version of Python
import pickle
pickle.HIGHEST_PROTOCOL = 4
import pandas as pd
import numpy as np
import scipy.stats
import sigdigits as sd
# Magic numbers
min_pvalue = 0.05
max_zscore = 3
################################################################################
# Helper functions
# Read a CSV file outputted by vfc_probe as a Pandas dataframe
def read_probes_csv(filepath, backend, warnings, execution_data):
try:
results = pd.read_csv(filepath)
except FileNotFoundError:
print(
"Warning [vfc_ci]: Probes not found, your code might have crashed " \
"or you might have forgotten to call vfc_dump_probes"
)
warnings.append(execution_data)
return pd.DataFrame(
columns = ["test", "variable", "values", "vfc_backend"]
)
except Exception:
print(
"Warning [vfc_ci]: Your probes could not be read for some unknown " \
"reason"
)
warnings.append(execution_data)
return pd.DataFrame(
columns = ["test", "variable", "values", "vfc_backend"]
)
if len(results) == 0:
print(
"Warning [vfc_ci]: Probes empty, it looks like you have dumped " \
"them without calling vfc_put_probe"
)
warnings.append(execution_data)
# Once the CSV has been opened and validated, return its content
results["value"] = results["value"].apply(lambda x: float.fromhex(x))
results.rename(columns = {"value":"values"}, inplace = True)
results["vfc_backend"] = backend
return results
# Wrappers to sd.significant_digits (returns results in base 2)
def significant_digits(x):
# In a pandas DF, "values" actually refers to the array of columns, and
# not the column named "values"
distribution = x.values[3]
distribution = distribution.reshape(len(distribution), 1)
# The distribution's empirical average will be used as the reference
mu = np.array([x.mu])
# If the null hypothesis is rejected, call sigdigits with General mode:
if x.pvalue < min_pvalue:
method = sd.Method.General
s = sd.significant_digits(
distribution,
mu,
precision=sd.Precision.Absolute,
method=method
)
# Else, manually compute sMCA which is equivalent to a 66% confidence interval
else:
method = sd.Method.CNH
s = sd.significant_digits(
distribution,
mu,
precision=sd.Precision.Absolute,
method=method,
probability=0.66,
confidence=0.66,
)
# s is returned as a size 1 list
return s[0]
def significant_digits_lower_bound(x):
# If the null hypothesis is rejected, no lower bound
if x.pvalue < min_pvalue:
return x.s2
# Else, the lower bound will be a 95% confidence interval
distribution = x.values[3]
distribution = distribution.reshape(len(distribution), 1)
mu = np.array([x.mu])
s = sd.significant_digits(
distribution,
mu,
precision=sd.Precision.Absolute,
method=sd.Method.CNH,
)
return s[0]
################################################################################
# Main functions
# Open and read the tests config file
def read_config():
try:
with open("vfc_tests_config.json", "r") as file:
data = file.read()
except FileNotFoundError as e:
e.strerror = "Error [vfc_ci]: This file is required to describe the tests "\
"to run and generate a Verificarlo run file"
raise e
return json.loads(data)
# Set up metadata
def generate_metadata(is_git_commit):
# Metadata and filename are initiated as if no commit was associated
metadata = {
"timestamp": calendar.timegm(time.gmtime()),
"is_git_commit": is_git_commit,
"hash": "",
"author": "",
"message": ""
}
if is_git_commit:
print("Fetching metadata from last commit...")
from git import Repo
repo = Repo(".")
head_commit = repo.head.commit
metadata["timestamp"] = head_commit.authored_date
metadata["hash"] = str(head_commit)[0:7]
metadata["author"] = "%s <%s>" \
% (str(head_commit.author), head_commit.author.email)
metadata["message"] = head_commit.message.split("\n")[0]
return metadata
# Execute tests and collect results in a Pandas dataframe (+ dataprocessing)
def run_tests(config):
# Run the build command
print("Info [vfc_ci]: Building tests...")
os.system(config["make_command"])
# This is an array of Pandas dataframes for now
data = []
# Create tmp folder to export results
os.system("mkdir .vfcruns.tmp")
n_files = 0
# This will contain all executables/repetition numbers from which we could
# not get any data
warnings = []
# Tests execution loop
for executable in config["executables"]:
print("Info [vfc_ci]: Running executable :", executable["executable"], "...")
parameters = ""
if "parameters" in executable:
parameters = executable["parameters"]
for backend in executable["vfc_backends"]:
export_backend = "VFC_BACKENDS=\"" + backend["name"] + "\" "
command = "./" + executable["executable"] + " " + parameters
repetitions = 1
if "repetitions" in backend:
repetitions = backend["repetitions"]
# Run test repetitions and save results
for i in range(repetitions):
file = ".vfcruns.tmp/%s.csv" % str(n_files)
export_output = "VFC_PROBES_OUTPUT=\"%s\" " % file
os.system(export_output + export_backend + command)
# This will only be used if we need to append this exec to the
# warnings list
execution_data = {
"executable": executable["executable"],
"backend": backend["name"],
"repetition": i + 1
}
data.append(read_probes_csv(
file,
backend["name"],
warnings,
execution_data
))
n_files = n_files + 1
# Clean CSV output files (by deleting the tmp folder)
os.system("rm -rf .vfcruns.tmp")
# Combine all separate executions in one dataframe
data = pd.concat(data, sort=False, ignore_index=True)
data = data.groupby(["test", "vfc_backend", "variable"])\
.values.apply(list).reset_index()
# Make sure we have some data to work on
assert(len(data) != 0), "Error [vfc_ci]: No data have been generated " \
"by your tests executions, aborting run without writing results file"
return data, warnings
# Data processing
def data_processing(data):
data["values"] = data["values"].apply(lambda x: np.array(x).astype(float))
# Get empirical average, standard deviation and p-value
data["mu"] = data["values"].apply(np.average)
data["sigma"] = data["values"].apply(np.std)
data["pvalue"] = data["values"].apply(lambda x: scipy.stats.shapiro(x).pvalue)
# Significant digits
data["s2"] = data.apply(significant_digits, axis=1)
data["s10"] = data["s2"].apply(lambda x: sd.change_base(x, 10))
# Lower bound of the confidence interval using the sigdigits module
data["s2_lower_bound"] = data.apply(significant_digits_lower_bound, axis=1)
data["s10_lower_bound"] = data["s2_lower_bound"].apply(lambda x: sd.change_base(x, 10))
# Compute moments of the distribution
# (including a new distribution obtained by filtering outliers)
data["values"] = data["values"].apply(np.sort)
data["mu"] = data["values"].apply(np.average)
data["min"] = data["values"].apply(np.min)
data["quantile25"] = data["values"].apply(np.quantile, args=(0.25,))
data["quantile50"] = data["values"].apply(np.quantile, args=(0.50,))
data["quantile75"] = data["values"].apply(np.quantile, args=(0.75,))
data["max"] = data["values"].apply(np.max)
data["nsamples"] = data["values"].apply(len)
# Display all executions that resulted in a warning
def show_warnings(warnings):
if len(warnings) > 0:
print(
"Warning [vfc_ci]: Some of your runs could not generate any data " \
"(for instance because your code crashed) and resulted in "
"warnings. Here is the complete list :"
)
for i in range(0, len(warnings)):
print("- Warning %s:" % i)
print(" Executable: %s" % warnings[i]["executable"])
print(" Backend: %s" % warnings[i]["backend"])
print(" Repetition: %s" % warnings[i]["repetition"])
################################################################################
# Entry point
def run(is_git_commit, export_raw_values, dry_run):
# Get config, metadata and data
print("Info [vfc_ci]: Reading tests config file...")
config = read_config()
print("Info [vfc_ci]: Generating run metadata...")
metadata = generate_metadata(is_git_commit)
data, warnings = run_tests(config)
show_warnings(warnings)
# Data processing
print("Info [vfc_ci]: Processing data...")
data_processing(data)
# Prepare data for export (by creating a proper index and linking run timestamp)
data = data.set_index(["test", "variable", "vfc_backend"]).sort_index()
data["timestamp"] = metadata["timestamp"]
filename = metadata["hash"] if is_git_commit else str(metadata["timestamp"])
# Prepare metadata for export
metadata = pd.DataFrame.from_dict([metadata])
metadata = metadata.set_index("timestamp")
# NOTE : Exporting to HDF5 requires to install "tables" on the system
# Export raw data if needed
if export_raw_values and not dry_run:
data.to_hdf(filename + ".vfcraw.hd5", key="data")
metadata.to_hdf(filename + ".vfcraw.hd5", key="metadata")
# Export data
del data["values"]
if not dry_run:
data.to_hdf(filename + ".vfcrun.hd5", key="data")
metadata.to_hdf(filename + ".vfcrun.hd5", key="metadata")
# Print termination messages
print(
"Info [vfc_ci]: The results have been successfully written to " \
"%s.vfcrun.hd5." \
% filename
)
if export_raw_values:
print(
"Info [vfc_ci]: A file containing the raw values has also been " \
"created : %s.vfcraw.hd5."
% filename
)
if dry_run:
print(
"Info [vfc_ci]: The dry run flag was enabled, so no files were " \
"actually created."
)

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,556 @@
# Manage the view comparing a variable over different runs
import time
import pandas as pd
from math import pi
from bokeh.plotting import figure, curdoc
from bokeh.embed import components
from bokeh.models import Select, ColumnDataSource, Panel, Tabs, HoverTool, \
TextInput, CheckboxGroup, TapTool, CustomJS
import helper
import plot
################################################################################
class CompareRuns:
# Helper functions related to CompareRuns
# From an array of timestamps, returns the array of runs names (for the x
# axis ticks), as well as the metadata (in a dict of arrays) associated to
# this array (for the tooltips)
def gen_x_series(self, timestamps):
# Initialize the objects to return
x_series= []
x_metadata = dict(
date = [],
is_git_commit = [],
hash = [],
author = [],
message = []
)
# n == 0 means we want all runs, we also make sure not to go out of
# bound if asked for more runs than we have
n = self.current_n_runs
if n == 0 or n > len(timestamps):
n = len(timestamps)
for i in range(0, n):
# Get metadata associated to this run
row_metadata = helper.get_metadata(self.metadata, timestamps[-i-1])
date = time.ctime(timestamps[-i-1])
# Fill the x series
str = row_metadata["name"]
x_series.insert(0, helper.get_metadata(self.metadata, timestamps[-i-1])["name"])
# Fill the metadata lists
x_metadata["date"].insert(0, date)
x_metadata["is_git_commit"].insert(0, row_metadata["is_git_commit"])
x_metadata["hash"].insert(0, row_metadata["hash"])
x_metadata["author"].insert(0, row_metadata["author"])
x_metadata["message"].insert(0, row_metadata["message"])
return x_series, x_metadata
# Plots update function
def update_plots(self):
# Select all data matching current test/var/backend
runs = self.data.loc[
[self.widgets["select_test"].value],
self.widgets["select_var"].value, self.widgets["select_backend"].value
]
timestamps = runs["timestamp"]
x_series, x_metadata = self.gen_x_series(timestamps.sort_values())
# Update source
main_dict = runs.to_dict("series")
main_dict["x"] = x_series
# Add metadata (for tooltip)
main_dict.update(x_metadata)
# Select the last n runs only
n = self.current_n_runs
main_dict = {key:value[-n:] for key, value in main_dict.items()}
# Generate ColumnDataSources for the 3 dotplots
for stat in ["sigma", "s10", "s2"]:
dict = {
"%s_x" % stat: main_dict["x"],
"is_git_commit": main_dict["is_git_commit"],
"date": main_dict["date"],
"hash": main_dict["hash"],
"author": main_dict["author"],
"message": main_dict["message"],
stat: main_dict[stat],
"nsamples": main_dict["nsamples"],
}
if stat == "s10" or stat == "s2":
dict["%s_lower_bound" % stat] = main_dict["%s_lower_bound" % stat]
# Filter outliers if the box is checked
if len(self.widgets["outliers_filtering_compare"].active) > 0:
outliers = helper.detect_outliers(dict[stat])
dict[stat] = helper.remove_outliers(dict[stat], outliers)
dict["%s_x" % stat] = helper.remove_outliers(dict["%s_x" % stat], outliers)
# Assign ColumnDataSource
self.sources["%s_source" % stat].data = dict
# Generate ColumnDataSource for the boxplot
dict = {
"is_git_commit": main_dict["is_git_commit"],
"date": main_dict["date"],
"hash": main_dict["hash"],
"author": main_dict["author"],
"message": main_dict["message"],
"x": main_dict["x"],
"min" : main_dict["min"],
"quantile25" : main_dict["quantile25"],
"quantile50" : main_dict["quantile50"],
"quantile75" : main_dict["quantile75"],
"max" : main_dict["max"],
"mu" : main_dict["mu"],
"pvalue" : main_dict["pvalue"],
"nsamples": main_dict["nsamples"]
}
self.sources["boxplot_source"].data = dict
# Update x_ranges
helper.reset_x_range(self.plots["boxplot"], self.sources["boxplot_source"].data["x"])
helper.reset_x_range(self.plots["sigma_plot"], self.sources["sigma_source"].data["sigma_x"])
helper.reset_x_range(self.plots["s10_plot"], self.sources["s10_source"].data["s10_x"])
helper.reset_x_range(self.plots["s2_plot"], self.sources["s2_source"].data["s2_x"])
# Widgets' callback functions
def update_test(self, attrname, old, new):
# If the value is updated by the CustomJS, self.widgets["select_var"].value
# won't be updated, so we have to look for that case and assign it manually
# "new" should be a list when updated by CustomJS
if type(new) == list:
# If filtering removed all options, we might have an empty list
# (in this case, we just skip the callback and do nothing)
if len(new) > 0:
new = new[0]
else:
return
if new != self.widgets["select_test"].value:
# The callback will be triggered again with the updated value
self.widgets["select_test"].value = new
return
# New list of available vars
self.vars = self.data.loc[new]\
.index.get_level_values("variable").drop_duplicates().tolist()
self.widgets["select_var"].options = self.vars
# Reset var selection if old one is not available in new vars
if self.widgets["select_var"].value not in self.vars:
self.widgets["select_var"].value = self.vars[0]
# The update_var callback will be triggered by the assignment
else:
# Trigger the callback manually (since the plots need to be updated
# anyway)
self.update_var("", "", self.widgets["select_var"].value)
def update_var(self, attrname, old, new):
# If the value is updated by the CustomJS, self.widgets["select_var"].value
# won't be updated, so we have to look for that case and assign it manually
# new should be a list when updated by CustomJS
if type(new) == list:
new = new[0]
if new != self.widgets["select_var"].value:
# The callback will be triggered again with the updated value
self.widgets["select_var"].value = new
return
# New list of available backends
self.backends = self.data.loc[self.widgets["select_test"].value, self.widgets["select_var"].value]\
.index.get_level_values("vfc_backend").drop_duplicates().tolist()
self.widgets["select_backend"].options = self.backends
# Reset backend selection if old one is not available in new backends
if self.widgets["select_backend"].value not in self.backends:
self.widgets["select_backend"].value = self.backends[0]
# The update_backend callback will be triggered by the assignment
else:
# Trigger the callback manually (since the plots need to be updated
# anyway)
self.update_backend("", "", self.widgets["select_backend"].value)
def update_backend(self, attrname, old, new):
# Simply update plots, since no other data is affected
self.update_plots()
def update_n_runs(self, attrname, old, new):
# Simply update runs selection (value and string display)
self.select_n_runs.value = new
self.current_n_runs = self.n_runs_dict[self.select_n_runs.value]
self.update_plots()
def update_outliers_filtering(self, attrname, old, new):
self.update_plots()
# Bokeh setup functions
def setup_plots(self):
tools = "pan, wheel_zoom, xwheel_zoom, ywheel_zoom, reset, save"
# Custom JS callback that will be used when tapping on a run
# Only switches the view, a server callback is required to update plots
# (defined inside template to avoid bloating server w/ too much JS code)
js_tap_callback = "goToInspectRuns();"
# Box plot
self.plots["boxplot"] = figure(
name="boxplot", title="Variable distribution over runs",
plot_width=900, plot_height=400, x_range=[""],
tools=tools, sizing_mode="scale_width"
)
box_tooltips = [
("Git commit", "@is_git_commit"),
("Date", "@date"),
("Hash", "@hash"),
("Author", "@author"),
("Message", "@message"),
("Min", "@min{%0.18e}"),
("Max", "@max{%0.18e}"),
("1st quartile", "@quantile25{%0.18e}"),
("Median", "@quantile50{%0.18e}"),
("3rd quartile", "@quantile75{%0.18e}"),
("μ", "@mu{%0.18e}"),
("p-value", "@pvalue"),
("Number of samples", "@nsamples")
]
box_tooltips_formatters = {
"@min" : "printf",
"@max" : "printf",
"@quantile25" : "printf",
"@quantile50" : "printf",
"@quantile75" : "printf",
"@mu" : "printf"
}
plot.fill_boxplot(
self.plots["boxplot"], self.sources["boxplot_source"],
tooltips = box_tooltips,
tooltips_formatters = box_tooltips_formatters,
js_tap_callback = js_tap_callback,
server_tap_callback = self.inspect_run_callback_boxplot,
)
self.doc.add_root(self.plots["boxplot"])
# Sigma plot (bar plot)
self.plots["sigma_plot"] = figure(
name="sigma_plot", title="Standard deviation σ over runs",
plot_width=900, plot_height=400, x_range=[""],
tools=tools, sizing_mode="scale_width"
)
sigma_tooltips = [
("Git commit", "@is_git_commit"),
("Date", "@date"),
("Hash", "@hash"),
("Author", "@author"),
("Message", "@message"),
("σ", "@sigma"),
("Number of samples", "@nsamples")
]
plot.fill_dotplot(
self.plots["sigma_plot"], self.sources["sigma_source"], "sigma",
tooltips = sigma_tooltips,
js_tap_callback = js_tap_callback,
server_tap_callback = self.inspect_run_callback_sigma,
lines = True
)
self.doc.add_root(self.plots["sigma_plot"])
# s plot (bar plot with 2 tabs)
self.plots["s10_plot"] = figure(
name="s10_plot", title="Significant digits s over runs",
plot_width=900, plot_height=400, x_range=[""],
tools=tools, sizing_mode="scale_width"
)
s10_tooltips = [
("Git commit", "@is_git_commit"),
("Date", "@date"),
("Hash", "@hash"),
("Author", "@author"),
("Message", "@message"),
("s", "@s10"),
("s lower bound", "@s10_lower_bound"),
("Number of samples", "@nsamples")
]
plot.fill_dotplot(
self.plots["s10_plot"], self.sources["s10_source"], "s10",
tooltips = s10_tooltips,
js_tap_callback = js_tap_callback,
server_tap_callback = self.inspect_run_callback_s10,
lines = True,
lower_bound=True
)
s10_tab = Panel(child=self.plots["s10_plot"], title="Base 10")
self.plots["s2_plot"] = figure(
name="s2_plot", title="Significant digits s over runs",
plot_width=900, plot_height=400, x_range=[""],
tools=tools, sizing_mode="scale_width"
)
s2_tooltips = [
("Git commit", "@is_git_commit"),
("Date", "@date"),
("Hash", "@hash"),
("Author", "@author"),
("Message", "@message"),
("s", "@s2"),
("s lower bound", "@s2_lower_bound"),
("Number of samples", "@nsamples")
]
plot.fill_dotplot(
self.plots["s2_plot"], self.sources["s2_source"], "s2",
tooltips = s2_tooltips,
js_tap_callback = js_tap_callback,
server_tap_callback = self.inspect_run_callback_s2,
lines = True,
lower_bound=True
)
s2_tab = Panel(child=self.plots["s2_plot"], title="Base 2")
s_tabs = Tabs(
name = "s_tabs",
tabs=[s10_tab, s2_tab],
tabs_location = "below"
)
self.doc.add_root(s_tabs)
def setup_widgets(self):
# Initial selections
# Test/var/backend combination (we select all first elements at init)
self.tests = self.data\
.index.get_level_values("test").drop_duplicates().tolist()
self.vars = self.data.loc[self.tests[0]]\
.index.get_level_values("variable").drop_duplicates().tolist()
self.backends = self.data.loc[self.tests[0], self.vars[0]]\
.index.get_level_values("vfc_backend").drop_duplicates().tolist()
# Custom JS callback that will be used client side to filter selections
filter_callback_js = """
selector.options = options.filter(e => e.includes(cb_obj.value));
"""
# Test selector widget
# Number of runs to display
# The dict structure allows us to get int value from the display string
# in O(1)
self.n_runs_dict = {
"Last 3 runs": 3,
"Last 5 runs": 5,
"Last 10 runs": 10,
"All runs": 0
}
# Contains all options strings
n_runs_display = list(self.n_runs_dict.keys())
# Will be used when updating plots (contains actual number to diplay)
self.current_n_runs = self.n_runs_dict[n_runs_display[1]]
# Selector widget
self.widgets["select_test"] = Select(
name="select_test", title="Test :",
value=self.tests[0], options=self.tests
)
self.doc.add_root(self.widgets["select_test"])
self.widgets["select_test"].on_change("value", self.update_test)
self.widgets["select_test"].on_change("options", self.update_test)
# Filter widget
self.widgets["test_filter"] = TextInput(
name="test_filter", title="Tests filter:"
)
self.widgets["test_filter"].js_on_change("value", CustomJS(
args=dict(options=self.tests, selector=self.widgets["select_test"]),
code=filter_callback_js
))
self.doc.add_root(self.widgets["test_filter"])
# Number of runs to display
self.widgets["select_n_runs"] = Select(
name="select_n_runs", title="Display :",
value=n_runs_display[1], options=n_runs_display
)
self.doc.add_root(self.widgets["select_n_runs"])
self.widgets["select_n_runs"].on_change("value", self.update_n_runs)
# Variable selector widget
self.widgets["select_var"] = Select(
name="select_var", title="Variable :",
value=self.vars[0], options=self.vars
)
self.doc.add_root(self.widgets["select_var"])
self.widgets["select_var"].on_change("value", self.update_var)
self.widgets["select_var"].on_change("options", self.update_var)
# Backend selector widget
self.widgets["select_backend"] = Select(
name="select_backend", title="Verificarlo backend :",
value=self.backends[0], options=self.backends
)
self.doc.add_root(self.widgets["select_backend"])
self.widgets["select_backend"].on_change("value", self.update_backend)
# Outliers filtering checkbox
self.widgets["outliers_filtering_compare"] = CheckboxGroup(
name="outliers_filtering_compare",
labels=["Filter outliers"], active =[]
)
self.doc.add_root(self.widgets["outliers_filtering_compare"])
self.widgets["outliers_filtering_compare"]\
.on_change("active", self.update_outliers_filtering)
# Communication methods
# (to send/receive messages to/from master)
# Callback to change view of Inspect runs when data is selected
def inspect_run_callback(self, new, source_name, x_name):
# In case we just unselected everything, then do nothing
if new == []:
return
index = new[-1]
run_name = self.sources[source_name].data[x_name][index]
self.master.go_to_inspect(run_name)
# Wrappers for each plot (since new is the index of the clicked element,
# it is dependent of the plot because we could have filtered some outliers)
# There doesn't seem to be an easy way to add custom parameters to a
# Bokeh callback, so using wrappers seems to be the best solution for now
def inspect_run_callback_boxplot(self, attr, old, new):
self.inspect_run_callback(new, "boxplot_source", "x")
def inspect_run_callback_sigma(self, attr, old, new):
self.inspect_run_callback(new, "sigma_source", "sigma_x")
def inspect_run_callback_s2(self, attr, old, new):
self.inspect_run_callback(new, "s2_source", "s2_x")
def inspect_run_callback_s10(self, attr, old, new):
self.inspect_run_callback(new, "s10_source", "s10_x")
# Constructor
def __init__(self, master, doc, data, metadata):
self.master = master
self.doc = doc
self.data = data
self.metadata = metadata
self.sources = {
"boxplot_source": ColumnDataSource(data={}),
"sigma_source": ColumnDataSource(data={}),
"s10_source" :ColumnDataSource(data={}),
"s2_source": ColumnDataSource(data={})
}
self.plots = {}
self.widgets = {}
# Setup Bokeh objects
self.setup_plots()
self.setup_widgets()
# At this point, everything should have been initialized, so we can
# show the plots for the first time
self.update_plots()

166
ci/vfc_ci_report/helper.py Normal file
View File

@ -0,0 +1,166 @@
# General helper functions for both compare_runs and compare_variables
import calendar
import time
from itertools import compress
import numpy as np
# Magic numbers
max_ticks = 15
max_zscore = 3
################################################################################
# From a timestamp, return the associated metadata as a Pandas serie
def get_metadata(metadata, timestamp):
return metadata.loc[timestamp]
# Convert a metadata Pandas series to a JS readable dict
def metadata_to_dict(metadata):
dict = metadata.to_dict()
# JS doesn't accept True for booleans, and Python doesn't accept true
# (because of the caps) => using an integer is a portable solution
dict["is_git_commit"] = 1 if dict["is_git_commit"] else 0
dict["date"] = time.ctime(metadata.name)
return dict
# Return a string that indicates the elapsed time since the run, used as the
# x-axis tick in "Compare runs" or when selecting run in "Inspect run"
def get_run_name(timestamp, hash):
gmt = time.gmtime()
now = calendar.timegm(gmt)
diff = now - timestamp
# Special case : < 1 minute (return string directly)
if diff < 60:
str = "Less than a minute ago"
if hash != "":
str = str + " (%s)" % hash
if str == get_run_name.previous:
get_run_name.counter = get_run_name.counter + 1
str = "%s (%s)" % (str, get_run_name.counter)
else:
get_run_name.counter = 0
get_run_name.previous = str
return str
# < 1 hour
if diff < 3600:
n = int(diff / 60)
str = "%s minute%s ago"
# < 1 day
elif diff < 86400:
n = int(diff / 3600)
str = "%s hour%s ago"
# < 1 week
elif diff < 604800:
n = int(diff / 86400)
str = "%s day%s ago"
# < 1 month
elif diff < 2592000:
n = int(diff / 604800)
str = "%s week%s ago"
# > 1 month
else:
n = diff / 2592000
str = "%s month%s ago"
plural = ""
if n != 1:
plural = "s"
str = str % (n, plural)
# We might want to add the git hash
if hash != "":
str = str + " (%s)" % hash
# Finally, check for duplicate with previously generated string
if str == get_run_name.previous:
# Increment the duplicate counter and add it to str
get_run_name.counter = get_run_name.counter + 1
str = "%s (%s)" % (str, get_run_name.counter)
else:
# No duplicate, reset both previously generated str and duplicate counter
get_run_name.counter = 0
get_run_name.previous = str
return str
# These external variables will store data about the last generated string to
# avoid duplicates (assuming the runs are sorted by time)
get_run_name.counter = 0
get_run_name.previous = ""
def reset_run_strings():
get_run_name.counter = 0
get_run_name.previous = ""
# Update all the x-ranges from a dict of plots
def reset_x_range(plot, x_range):
plot.x_range.factors = x_range
if len(x_range) < max_ticks:
plot.xaxis.major_tick_line_color = "#000000"
plot.xaxis.minor_tick_line_color = "#000000"
plot.xaxis.major_label_text_font_size = "8pt"
else:
plot.xaxis.major_tick_line_color = None
plot.xaxis.minor_tick_line_color = None
plot.xaxis.major_label_text_font_size = "0pt"
# Return an array of booleans that indicate which elements are outliers
# (True means element is not an outlier and must be kept)
def detect_outliers(array, max_zscore=max_zscore):
if len(array) <= 2:
return [True] * len(array)
median = np.median(array)
std = np.std(array)
if std == 0:
return array
distance = abs(array - median)
# Array of booleans with elements to be filtered
outliers_array = distance < max_zscore * std
return outliers_array
def remove_outliers(array, outliers):
return list(compress(array, outliers))
def remove_boxplot_outliers(dict, outliers, prefix):
outliers = detect_outliers(dict["%s_max" % prefix])
dict["%s_x" % prefix] = remove_outliers(dict["%s_x" % prefix], outliers)
dict["%s_min" % prefix] = remove_outliers(dict["%s_min" % prefix], outliers)
dict["%s_quantile25" % prefix] = remove_outliers(dict["%s_quantile25" % prefix], outliers)
dict["%s_quantile50" % prefix] = remove_outliers(dict["%s_quantile50" % prefix], outliers)
dict["%s_quantile75" % prefix] = remove_outliers(dict["%s_quantile75" % prefix], outliers)
dict["%s_max" % prefix] = remove_outliers(dict["%s_max" % prefix], outliers)
dict["%s_mu" % prefix] = remove_outliers(dict["%s_mu" % prefix], outliers)
dict["nsamples"] = remove_outliers(dict["nsamples"], outliers)

View File

@ -0,0 +1,588 @@
# Manage the view comparing the variables of a run
from math import pi
from functools import partial
import pandas as pd
import numpy as np
from bokeh.plotting import figure, curdoc
from bokeh.embed import components
from bokeh.models import Select, ColumnDataSource, Panel, Tabs, HoverTool,\
RadioButtonGroup, CheckboxGroup, CustomJS
import helper
import plot
################################################################################
class InspectRuns:
# Helper functions related to InspectRun
# Returns a dictionary mapping user-readable strings to all run timestamps
def gen_runs_selection(self):
runs_dict = {}
# Iterate over timestamp rows (runs) and fill dict
for row in self.metadata.iloc:
# The syntax used by pandas makes this part a bit tricky :
# row.name is the index of metadata (so it refers to the
# timestamp), whereas row["name"] is the column called "name"
# (which is the display string used for the run)
# runs_dict[run's name] = run's timestamp
runs_dict[row["name"]] = row.name
return runs_dict
def gen_boxplot_tooltips(self, prefix):
return [
("Name", "@%s_x" % prefix),
("Min", "@" + prefix + "_min{%0.18e}"),
("Max", "@" + prefix + "_max{%0.18e}"),
("1st quartile", "@" + prefix + "_quantile25{%0.18e}"),
("Median", "@" + prefix + "_quantile50{%0.18e}"),
("3rd quartile", "@" + prefix + "_quantile75{%0.18e}"),
("μ", "@" + prefix + "_mu{%0.18e}"),
("Number of samples (tests)", "@nsamples")
]
def gen_boxplot_tooltips_formatters(self, prefix):
return {
"@%s_min" % prefix : "printf",
"@%s_max" % prefix : "printf",
"@%s_quantile25" % prefix : "printf",
"@%s_quantile50" % prefix : "printf",
"@%s_quantile75" % prefix : "printf",
"@%s_mu" % prefix : "printf"
}
# Data processing helper
# (computes new distributions for sigma, s2, s10)
def data_processing(self, dataframe):
# Compute aggragated mu
dataframe["mu"] = np.vectorize(np.average)(dataframe["mu"], weights=dataframe["nsamples"])
# nsamples is the number of aggregated elements (as well as the number
# of samples for our new sigma and s distributions)
dataframe["nsamples"] = dataframe["nsamples"].apply(lambda x: len(x))
dataframe["mu_x"] = dataframe.index
# Make sure that strings don't excede a certain length
dataframe["mu_x"] = dataframe["mu_x"].apply(
lambda x: x[:17] + "[...]" + x[-17:] if len(x) > 39 else x
)
# Get quantiles and mu for sigma, s10, s2
for prefix in ["sigma", "s10", "s2"]:
dataframe["%s_x" % prefix] = dataframe["mu_x"]
dataframe[prefix] = dataframe[prefix].apply(np.sort)
dataframe["%s_min" % prefix] = dataframe[prefix].apply(np.min)
dataframe["%s_quantile25" % prefix] = dataframe[prefix].apply(np.quantile, args=(0.25,))
dataframe["%s_quantile50" % prefix] = dataframe[prefix].apply(np.quantile, args=(0.50,))
dataframe["%s_quantile75" % prefix] = dataframe[prefix].apply(np.quantile, args=(0.75,))
dataframe["%s_max" % prefix] = dataframe[prefix].apply(np.max)
dataframe["%s_mu" % prefix] = dataframe[prefix].apply(np.average)
del dataframe[prefix]
return dataframe
# Plots update function
def update_plots(self):
groupby_display = self.widgets["groupby_radio"].labels[
self.widgets["groupby_radio"].active
]
groupby = self.factors_dict[groupby_display]
filterby_display = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active
]
filterby = self.factors_dict[filterby_display]
# Groupby and aggregate lines belonging to the same group in lists
groups = self.run_data[
self.run_data.index.isin(
[self.widgets["select_filter"].value],
level=filterby
)
].groupby(groupby)
groups = groups.agg({
"sigma": lambda x: x.tolist(),
"s10": lambda x: x.tolist(),
"s2": lambda x: x.tolist(),
"mu": lambda x: x.tolist(),
# Used for mu weighted average first, then will be replaced
"nsamples": lambda x: x.tolist()
})
# Compute the new distributions, ...
groups = self.data_processing(groups).to_dict("list")
# Update source
# Assign each ColumnDataSource, starting with the boxplots
for prefix in ["sigma", "s10", "s2"]:
dict = {
"%s_x" % prefix: groups["%s_x" % prefix],
"%s_min" % prefix: groups["%s_min" % prefix],
"%s_quantile25" % prefix: groups["%s_quantile25" % prefix],
"%s_quantile50" % prefix: groups["%s_quantile50" % prefix],
"%s_quantile75" % prefix: groups["%s_quantile75" % prefix],
"%s_max" % prefix: groups["%s_max" % prefix],
"%s_mu" % prefix: groups["%s_mu" % prefix],
"nsamples": groups["nsamples"]
}
# Filter outliers if the box is checked
if len(self.widgets["outliers_filtering_inspect"].active) > 0:
# Boxplots will be filtered by max then min
top_outliers = helper.detect_outliers(dict["%s_max" % prefix])
helper.remove_boxplot_outliers(dict, top_outliers, prefix)
bottom_outliers = helper.detect_outliers(dict["%s_min" % prefix])
helper.remove_boxplot_outliers(dict, bottom_outliers, prefix)
self.sources["%s_source" % prefix].data = dict
# Finish with the mu plot
dict = {
"mu_x": groups["mu_x"],
"mu": groups["mu"],
"nsamples": groups["nsamples"]
}
self.sources["mu_source"].data = dict
# Filter outliers if the box is checked
if len(self.widgets["outliers_filtering_inspect"].active) > 0:
mu_outliers = helper.detect_outliers(groups["mu"])
groups["mu"] = helper.remove_outliers(groups["mu"], mu_outliers)
groups["mu_x"] = helper.remove_outliers(groups["mu_x"], mu_outliers)
# Update plots axis/titles
# Get display string of the last (unselected) factor
factors_dict = self.factors_dict.copy()
del factors_dict[groupby_display]
del factors_dict[filterby_display]
over_all = list(factors_dict.keys())[0]
# Update all display strings for plot title (remove caps, plural)
groupby_display = groupby_display.lower()
filterby_display = filterby_display.lower()[:-1]
over_all = over_all.lower()
self.plots["mu_inspect"].title.text = \
"Empirical average μ of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, over_all)
self.plots["sigma_inspect"].title.text = \
"Standard deviation σ of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, over_all)
self.plots["s10_inspect"].title.text = \
"Significant digits s of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, over_all)
self.plots["s2_inspect"].title.text = \
"Significant digits s of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, over_all)
# Update x_ranges
helper.reset_x_range(self.plots["mu_inspect"], groups["mu_x"])
helper.reset_x_range(self.plots["sigma_inspect"], groups["sigma_x"])
helper.reset_x_range(self.plots["s10_inspect"], groups["s10_x"])
helper.reset_x_range(self.plots["s2_inspect"], groups["s2_x"])
# Widets' callback functions
# Run selector callback
def update_run(self, attrname, old, new):
filterby = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active
]
filterby = self.factors_dict[filterby]
# Update run selection (by using dict mapping)
self.current_run = self.runs_dict[new]
# Update run data
self.run_data = self.data[self.data["timestamp"] == self.current_run]
# Save old selected option
old_value = self.widgets["select_filter"].value
# Update filter options
options = self.run_data.index\
.get_level_values(filterby).drop_duplicates().tolist()
self.widgets["select_filter"].options = options
if old_value not in self.widgets["select_filter"].options:
self.widgets["select_filter"].value = options[0]
# The update_var callback will be triggered by the assignment
else:
# Trigger the callback manually (since the plots need to be updated
# anyway)
self.update_filter("", "", old_value)
# "Group by" radio
def update_groupby(self, attrname, old, new):
# Update "Filter by" radio list
filterby_list = list(self.factors_dict.keys())
del filterby_list[self.widgets["groupby_radio"].active]
self.widgets["filterby_radio"].labels = filterby_list
filterby = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active
]
filterby = self.factors_dict[filterby]
# Save old selected option
old_value = self.widgets["select_filter"].value
# Update filter options
options = self.run_data.index\
.get_level_values(filterby).drop_duplicates().tolist()
self.widgets["select_filter"].options = options
if old_value not in self.widgets["select_filter"].options:
self.widgets["select_filter"].value = options[0]
# The update_var callback will be triggered by the assignment
else:
# Trigger the callback manually (since the plots need to be updated
# anyway)
self.update_filter("", "", old_value)
# "Filter by" radio
def update_filterby(self, attrname, old, new):
filterby = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active
]
filterby = self.factors_dict[filterby]
# Save old selected option
old_value = self.widgets["select_filter"].value
# Update filter selector options
options = self.run_data.index\
.get_level_values(filterby).drop_duplicates().tolist()
self.widgets["select_filter"].options = options
if old_value not in self.widgets["select_filter"].options:
self.widgets["select_filter"].value = options[0]
# The update_var callback will be triggered by the assignment
else:
# Trigger the callback manually (since the plots need to be updated
# anyway)
self.update_filter("", "", old_value)
# Filter selector callback
def update_filter(self, attrname, old, new):
self.update_plots()
# Filter outliers checkbox callback
def update_outliers_filtering(self, attrname, old, new):
# The status (checked/unchecked) of the checkbox is also verified inside
# self.update_plots(), so calling this function is enough
self.update_plots()
# Bokeh setup functions
# (for both variable and backend selection at once)
def setup_plots(self):
tools = "pan, wheel_zoom, xwheel_zoom, ywheel_zoom, reset, save"
# Tooltips and formatters
dotplot_tooltips = [
("Name", "@mu_x"),
("μ", "@mu{%0.18e}"),
("Number of samples (tests)", "@nsamples")
]
dotplot_formatters = {
"@mu" : "printf"
}
sigma_boxplot_tooltips = self.gen_boxplot_tooltips("sigma")
sigma_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters("sigma")
s10_boxplot_tooltips = self.gen_boxplot_tooltips("s10")
s10_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters("s10")
s2_boxplot_tooltips = self.gen_boxplot_tooltips("s2")
s2_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters("s2")
# Plots
# Mu plot
self.plots["mu_inspect"] = figure(
name="mu_inspect",
title="",
plot_width=900, plot_height=400, x_range=[""],
tools=tools, sizing_mode="scale_width"
)
plot.fill_dotplot(
self.plots["mu_inspect"], self.sources["mu_source"], "mu",
tooltips = dotplot_tooltips,
tooltips_formatters = dotplot_formatters
)
self.doc.add_root(self.plots["mu_inspect"])
# Sigma plot
self.plots["sigma_inspect"] = figure(
name="sigma_inspect",
title="",
plot_width=900, plot_height=400, x_range=[""],
tools=tools, sizing_mode="scale_width"
)
plot.fill_boxplot(
self.plots["sigma_inspect"], self.sources["sigma_source"], prefix="sigma",
tooltips = sigma_boxplot_tooltips,
tooltips_formatters = sigma_boxplot_tooltips_formatters
)
self.doc.add_root(self.plots["sigma_inspect"])
# s plots
self.plots["s10_inspect"] = figure(
name="s10_inspect",
title="",
plot_width=900, plot_height=400, x_range=[""],
tools=tools, sizing_mode='scale_width'
)
plot.fill_boxplot(
self.plots["s10_inspect"], self.sources["s10_source"], prefix="s10",
tooltips = s10_boxplot_tooltips,
tooltips_formatters = s10_boxplot_tooltips_formatters
)
s10_tab_inspect = Panel(child=self.plots["s10_inspect"], title="Base 10")
self.plots["s2_inspect"] = figure(
name="s2_inspect",
title="",
plot_width=900, plot_height=400, x_range=[""],
tools=tools, sizing_mode='scale_width'
)
plot.fill_boxplot(
self.plots["s2_inspect"], self.sources["s2_source"], prefix="s2",
tooltips = s2_boxplot_tooltips,
tooltips_formatters = s2_boxplot_tooltips_formatters
)
s2_tab_inspect = Panel(child=self.plots["s2_inspect"], title="Base 2")
s_tabs_inspect = Tabs(
name = "s_tabs_inspect",
tabs=[s10_tab_inspect, s2_tab_inspect], tabs_location = "below"
)
self.doc.add_root(s_tabs_inspect)
def setup_widgets(self):
# Generation of selectable items
# Dict contains all inspectable runs (maps display strings to timestamps)
# The dict structure allows to get the timestamp from the display string
# in O(1)
self.runs_dict = self.gen_runs_selection()
# Dict maps display strings to column names for the different factors
# (var, backend, test)
self.factors_dict = {
"Variables": "variable",
"Backends": "vfc_backend",
"Tests": "test"
}
# Run selection
# Contains all options strings
runs_display = list(self.runs_dict.keys())
# Will be used when updating plots (contains actual number)
self.current_run = self.runs_dict[runs_display[-1]]
# Contains the selected option string, used to update current_n_runs
current_run_display = runs_display[-1]
# This contains only entries matching the run
self.run_data = self.data[self.data["timestamp"] == self.current_run]
change_run_callback_js="updateRunMetadata(cb_obj.value);"
self.widgets["select_run"] = Select(
name="select_run", title="Run :",
value=current_run_display, options=runs_display
)
self.doc.add_root(self.widgets["select_run"])
self.widgets["select_run"].on_change("value", self.update_run)
self.widgets["select_run"].js_on_change("value", CustomJS(
code = change_run_callback_js,
args=(dict(
metadata=helper.metadata_to_dict(
helper.get_metadata(self.metadata, self.current_run)
)
))
))
# Factors selection
# "Group by" radio
self.widgets["groupby_radio"] = RadioButtonGroup(
name="groupby_radio",
labels=list(self.factors_dict.keys()), active=0
)
self.doc.add_root(self.widgets["groupby_radio"])
# The functions are defined inside the template to avoid writing too
# much JS server side
self.widgets["groupby_radio"].on_change(
"active",
self.update_groupby
)
# "Filter by" radio
# Get all possible factors, and remove the one selected in "Group by"
filterby_list = list(self.factors_dict.keys())
del filterby_list[self.widgets["groupby_radio"].active]
self.widgets["filterby_radio"] = RadioButtonGroup(
name="filterby_radio",
labels=filterby_list, active=0
)
self.doc.add_root(self.widgets["filterby_radio"])
# The functions are defined inside the template to avoid writing too
# much JS server side
self.widgets["filterby_radio"].on_change(
"active",
self.update_filterby
)
# Filter selector
filterby = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active
]
filterby = self.factors_dict[filterby]
options = self.run_data.index\
.get_level_values(filterby).drop_duplicates().tolist()
self.widgets["select_filter"] = Select(
# We need a different name to avoid collision in the template with
# the runs comparison's widget
name="select_filter", title="Select a filter :",
value=options[0], options=options
)
self.doc.add_root(self.widgets["select_filter"])
self.widgets["select_filter"]\
.on_change("value", self.update_filter)
# Toggle for outliers filtering
self.widgets["outliers_filtering_inspect"] = CheckboxGroup(
name="outliers_filtering_inspect",
labels=["Filter outliers"], active = []
)
self.doc.add_root(self.widgets["outliers_filtering_inspect"])
self.widgets["outliers_filtering_inspect"]\
.on_change("active", self.update_outliers_filtering)
# Communication methods
# (to send/receive messages to/from master)
# When received, switch to the run_name in parameter
def switch_view(self, run_name):
self.widgets["select_run"].value = run_name
# Constructor
def __init__(self, master, doc, data, metadata):
self.master = master
self.doc = doc
self.data = data
self.metadata = metadata
self.sources = {
"mu_source": ColumnDataSource(data={}),
"sigma_source": ColumnDataSource(data={}),
"s10_source" :ColumnDataSource(data={}),
"s2_source": ColumnDataSource(data={})
}
self.plots = {}
self.widgets = {}
# Setup Bokeh objects
self.setup_plots()
self.setup_widgets()
# Pass the initial metadata to the template (will be updated in CustomJS
# callbacks). This is required because metadata is not displayed in a
# Bokeh widget, so we can't update this with a server callback.
initial_run = helper.get_metadata(self.metadata, self.current_run)
self.doc.template_variables["initial_timestamp"] = self.current_run
# At this point, everything should have been initialized, so we can
# show the plots for the first time
self.update_plots()

217
ci/vfc_ci_report/main.py Normal file
View File

@ -0,0 +1,217 @@
# Look for and read all the run files in the current directory (ending with
# .vfcrun.hd5), and lanch a Bokeh server for the visualization of this data.
import os
import sys
import time
import pandas as pd
from bokeh.plotting import curdoc
# Local imports from vfc_ci_server
import compare_runs
import inspect_runs
import helper
################################################################################
# Read vfcrun files, and aggregate them in one dataset
run_files = [ f for f in os.listdir(".") if f.endswith(".vfcrun.hd5") ]
if len(run_files) == 0:
print(
"Warning [vfc_ci]: Could not find any vfcrun files in the directory. " \
"This will result in server errors and prevent you from viewing the report."
)
# These are arrays of Pandas dataframes for now
metadata = []
data = []
for f in run_files:
metadata.append(pd.read_hdf(f, "metadata"))
data.append(pd.read_hdf(f, "data"))
metadata = pd.concat(metadata).sort_index()
data = pd.concat(data).sort_index()
# Generate the display strings for runs (runs ticks)
# By doing this in master, we ensure the homogeneity of display strings
# across all plots
metadata["name"] = metadata.index.to_series().map(
lambda x: helper.get_run_name(
x,
helper.get_metadata(metadata, x)["hash"]
)
)
helper.reset_run_strings()
metadata["date"] = metadata.index.to_series().map(
lambda x: time.ctime(x)
)
################################################################################
curdoc().title = "Verificarlo Report"
# Read server arguments
# (this is quite easy because Bokeh server is called through a wrapper, so
# we know exactly what the arguments might be)
git_repo_linked = False
commit_link = ""
has_logo = False
logo_url = ""
for i in range(1, len(sys.argv)):
# Look for the Git repository remote address
# (if a Git repo is specified, the webpage will contain hyperlinks to the
# repository and the different commits)
if sys.argv[i] == "git":
from urllib.parse import urlparse
method = sys.argv[i + 1]
address = sys.argv[i + 2]
url = ""
# Here, address is either the remote URL or the path to the local Git
# repo (depending on the method)
if method == "url":
# We should directly have a Git URL
url = address
elif method == "directory":
# Get the remote URL from the local repo
from git import Repo
repo = Repo(address)
url = repo.remotes.origin.url
else:
raise ValueError(
"Error [vfc_ci]: The specified method to get the Git " \
"repository is invalid. Are you calling Bokeh directly " \
"instead of using the Verificarlo wrapper ?"
)
# At this point, "url" should be set correctly, we can get the repo's
# URL and name, after making sure we're on a Git URL
parsed_url = urlparse(url)
path = parsed_url.path.split("/")
if len(path) < 3:
raise ValueError(
"Error [vfc_ci]: The found URL doesn't seem to be pointing " \
"to a Git repository (path is too short)"
)
repo_name = path[2]
curdoc().template_variables["repo_url"] = url
curdoc().template_variables["repo_name"] = repo_name
# We should have a "github.com" or a "*gitlab*" URL
if parsed_url.netloc == "github.com":
commit_link = "https://%s%s/commit/" \
% (parsed_url.netloc, parsed_url.path)
curdoc().template_variables["commit_link"] = commit_link
curdoc().template_variables["git_host"] = "GitHub"
# Used in Bokeh tooltips
commit_link = commit_link + "@hash"
# We assume we have a GitLab URL
else:
commit_link = "https://%s%s/-/commit/" \
% (parsed_url.netloc, parsed_url.path)
curdoc().template_variables["commit_link"] = commit_link
curdoc().template_variables["git_host"] = "GitLab"
# Used in Bokeh tooltips
commit_link = commit_link + "@hash"
git_repo_linked = True
# Look for a logo URL
# If a logo URL is specified, it will be included in the report's header
if sys.argv[i] == "logo":
curdoc().template_variables["logo_url"] = sys.argv[i + 1]
has_logo = True
# After the loop, we know if a repo has been linked, if we have a logo, ...
curdoc().template_variables["git_repo_linked"] = git_repo_linked
curdoc().template_variables["has_logo"] = has_logo
################################################################################
# Setup report views
# Define a ViewsMaster class to allow two-ways communication between views.
# This approach by classes allows us to have separate scopes for each view and
# will be useful if we want to add new views at some point in the future
# (instead of having n views with n-1 references each).
class ViewsMaster:
# Communication functions
def go_to_inspect(self, run_name):
self.inspect.switch_view(run_name)
#Constructor
def __init__(self, data, metadata, git_repo_linked, commit_link):
self.data = data
self.metadata = metadata
self.git_repo_linked = git_repo_linked
self.commit_link = commit_link
# Pass metadata to the template as a JSON string
curdoc().template_variables["metadata"] = self.metadata.to_json(orient="index")
# Runs comparison
self.compare = compare_runs.CompareRuns(
master = self,
doc = curdoc(),
data = data,
metadata = metadata,
)
# Runs inspection
self.inspect = inspect_runs.InspectRuns(
master = self,
doc = curdoc(),
data = data,
metadata = metadata,
)
views_master = ViewsMaster(
data = data,
metadata = metadata,
git_repo_linked = git_repo_linked,
commit_link = commit_link
)

151
ci/vfc_ci_report/plot.py Normal file
View File

@ -0,0 +1,151 @@
# General functions for filling plots with data in all report's views
from bokeh.plotting import figure
from bokeh.models import HoverTool, TapTool, CustomJS
from math import pi
def fill_dotplot(
plot, source, data_field,
tooltips=None, tooltips_formatters=None,
js_tap_callback=None, server_tap_callback=None,
lines=False,
lower_bound=False
):
# (Optional) Tooltip and tooltip formatters
if tooltips != None:
hover = HoverTool(tooltips = tooltips, mode="vline", names=["circle"])
if tooltips_formatters != None:
hover.formatters = tooltips_formatters
plot.add_tools(hover)
# (Optional) Add TapTool (for JS tap callback)
if js_tap_callback != None:
tap = TapTool(callback=CustomJS(code=js_tap_callback))
plot.add_tools(tap)
# (Optional) Add segment to represent a lower bound
if lower_bound:
lower_segment = plot.segment(
x0="%s_x" % data_field, y0=data_field,
x1="%s_x" % data_field, y1="%s_lower_bound" % data_field,
source=source, line_color="black"
)
# Draw dots (actually Bokeh circles)
circle = plot.circle(
name="circle",
x="%s_x" % data_field, y=data_field, source=source, size=12
)
# (Optional) Draw lines between dots
if lines:
line = plot.line(x="%s_x" % data_field, y=data_field, source=source)
# (Optional) Add server tap callback
if server_tap_callback != None:
circle.data_source.selected.on_change("indices", server_tap_callback)
# Plot appearance
plot.xgrid.grid_line_color = None
plot.ygrid.grid_line_color = None
plot.yaxis[0].formatter.power_limit_high = 0
plot.yaxis[0].formatter.power_limit_low = 0
plot.yaxis[0].formatter.precision = 3
plot.xaxis[0].major_label_orientation = pi/8
def fill_boxplot(
plot, source,
prefix="",
tooltips=None, tooltips_formatters=None,
js_tap_callback=None, server_tap_callback=None,
):
# (Optional) Tooltip and tooltip formatters
if tooltips != None:
hover = HoverTool(tooltips = tooltips, mode="vline", names=["full_box"])
if tooltips_formatters != None:
hover.formatters = tooltips_formatters
plot.add_tools(hover)
# (Optional) Add TapTool (for JS tap callback)
if js_tap_callback != None:
tap = TapTool(callback=CustomJS(code=js_tap_callback))
plot.add_tools(tap)
# Draw boxes (the prefix argument modifies the fields of ColumnDataSource
# that are used)
if prefix != "":
prefix = "%s_" % prefix
# Stems
top_stem = plot.segment(
x0="%sx" % prefix, y0="%smax" % prefix,
x1="%sx" % prefix, y1="%squantile75" % prefix,
source=source, line_color="black"
)
bottom_stem = plot.segment(
x0="%sx" % prefix, y0="%smin" % prefix,
x1="%sx" % prefix, y1="%squantile25" % prefix,
source=source, line_color="black"
)
# Boxes
full_box = plot.vbar(
name="full_box",
x="%sx" % prefix, width=0.5,
top="%squantile75" % prefix, bottom="%squantile25" % prefix,
source=source, line_color="black"
)
bottom_box = plot.vbar(
x="%sx" % prefix, width=0.5,
top="%squantile50" % prefix, bottom="%squantile25" % prefix,
source=source, line_color="black"
)
# Mu dot
mu_dot = plot.dot(
x="%sx" % prefix, y="%smu" % prefix, size=30, source=source,
color="black"
)
# (Optional) Add server tap callback
if server_tap_callback != None:
top_stem.data_source.selected.on_change("indices", server_tap_callback)
bottom_stem.data_source.selected.on_change("indices", server_tap_callback)
full_box.data_source.selected.on_change("indices", server_tap_callback)
bottom_box.data_source.selected.on_change("indices", server_tap_callback)
mu_dot.data_source.selected.on_change("indices", server_tap_callback)
# Plot appearance
plot.xgrid.grid_line_color = None
plot.ygrid.grid_line_color = None
plot.yaxis[0].formatter.power_limit_high = 0
plot.yaxis[0].formatter.power_limit_low = 0
plot.yaxis[0].formatter.precision = 3
plot.xaxis[0].major_label_orientation = pi/8

View File

@ -0,0 +1,480 @@
<!DOCTYPE html>
<html>
<head>
<title>Verificarlo Report</title>
<meta charset="utf8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<!-- This template uses Bulma for CSS : https://bulma.io/ -->
<!-- Doc : https://bulma.io/documentation/ -->
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bulma@0.9.2/css/bulma.min.css">
<style>
html, body{
background-color: #f5f5f5;
}
#navbar {
height: 67px;
}
#logo-link {
padding: 0;
padding-left: 16px;
}
#logo-img {
object-fit: cover;
margin-top: -18px;
margin-left: -8px;
max-height: 50px;
}
#compare-runs-container {
margin-top: 1em;
}
#inspect-runs-container {
margin-top: 1em;
display : none; /* This one is hidden by default */
}
.plot-card {
width: 900px;
}
#loader {
height: 100%;
}
#loading-logo {
position: absolute;
left: 50%;
top: 50%;
transform: translate(-50%, -50%);
max-width: 400px;
animation: pulse 1.5s linear infinite;
}
@keyframes pulse {
0% {
transform: scale(1) translate(-50%, -50%);
}
50% {
transform: scale(0.9) translate(-55%, -55%);
}
100% {
transform: scale(1) translate(-50%, -50%);
}
}
</style>
{% extends base %}
</head>
<body>
{% block contents %}
<!-- REPORT -->
<div id="report" style="display: none;">
<!-- HEADER -->
<nav class="navbar has-shadow" id="navbar"
role="navigation" aria-label="navbar-content">
<!-- BRAND (left part) -->
<div class="navbar-brand">
{% if has_logo %}
<a
class="navbar-item" id="logo-link" href="."
style="margin-top: 12px;"
>
<img id="logo-img" width="85" height="45"
src="{{ logo_url }}">
</a>
{% endif %}
<a class="navbar-item" id="logo-link" href=".">
<img id="logo-img" width="85" height="45"
src="https://avatars1.githubusercontent.com/u/12033642">
</a>
<a role="button" class="navbar-burger" id="navbar-burger"
aria-label="menu" aria-expanded="false"
data-target="navbarBasicExample">
<span aria-hidden="true"></span>
<span aria-hidden="true"></span>
<span aria-hidden="true"></span>
</a>
</div>
<!-- MENU (content) -->
<div id="navbar-content" class="navbar-menu">
<div id="buttons-container" class="navbar-start">
<a class="navbar-item is-active" id="compare-runs-button">
Compare runs
</a>
<a class="navbar-item" id="inspect-runs-button">
Inspect runs
</a>
</div>
<div class="navbar-end">
{% if git_repo_linked %}
<div class="navbar-item">
<div class="buttons">
<a class="button is-light" href="{{repo_url}}" target="_blank">
{{repo_name}} on {{git_host}}
</a>
</div>
</div>
{% endif %}
<div class="navbar-item">
<div class="buttons">
<a class="button is-light"
href="https://github.com/verificarlo" target="_blank">
Verificarlo on GitHub
</a>
</div>
</div>
</div>
</div>
</nav>
<!-- CONTENT : COMPARE RUNS -->
<main class="container" id="compare-runs-container">
<div class="columns">
<!-- SELECTORS -->
<div class="column">
<h4 class="title is-4">Selectors</h4>
<div id="compare-widgets">
{{ embed(roots.test_filter) }}
{{ embed(roots.select_test) }}
<br>
{{ embed(roots.select_var) }}
<br>
{{ embed(roots.select_backend) }}
<br>
{{ embed(roots.outliers_filtering_compare) }}
<br>
<br>
{{ embed(roots.select_n_runs) }}
<br>
<br>
<b>Tip :</b> You can click on any element of the plots
to inspect the corresponding run in details.
</div>
</div>
<div class="is-divider-vertical"></div>
<!-- PLOTS -->
<div class="column is-9">
<h3 class="title is-3">Plots</h3>
<div class="container">
<div class="card plot-card">
{{ embed(roots.s_tabs) }}
</div>
<br>
<div class="card plot-card">
{{ embed(roots.sigma_plot) }}
</div>
<br>
<div class="card plot-card">
{{ embed(roots.boxplot) }}
</div>
</div>
</div>
</div>
</main>
<!-- CONTENT : INSPECT RUNS -->
<main class="container" id="inspect-runs-container">
<div class="columns">
<!-- SELECTORS -->
<div class="column">
<h4 class="title is-4">Selectors</h4>
{{ embed(roots.select_run) }}
<br>
Group by :
{{ embed(roots.groupby_radio) }}
<br>
Filter by :
{{ embed(roots.filterby_radio) }}
{{ embed(roots.select_filter) }}
<br>
{{ embed(roots.outliers_filtering_inspect) }}
<br>
<br>
<h4 class="title is-4">Run metadata</h4>
<b>Date :</b>
<div id="run-date" style="display: inline;">
</div>
<br>
<div id="is-git-commit">
<b>Hash :</b>
<div
id="run-hash" style="display: inline;">
</div>
<br>
<b>Author :</b>
<div id="run-author" style="display: inline;">
</div>
<br>
<b>Message :</b>
<div id="run-message" style="display: inline;">
</div>
{% if git_repo_linked %}
<br>
<br>
<a
id="git-commit-link"
href=""
target="_blank"
>
View this commit on {{git_host}}
</a>
{% endif %}
</div>
<div id="not-git-commit">
This run is not linked to a Git commit.
</div>
</div>
<!-- PLOTS -->
<div class="column is-9">
<h3 class="title is-3">Plots</h3>
<div class="card plot-card">
{{ embed(roots.s_tabs_inspect) }}
</div>
<br>
<div class="card plot-card">
{{ embed(roots.sigma_inspect) }}
</div>
<br>
<div class="card plot-card">
{{ embed(roots.mu_inspect) }}
</div>
</div>
</div>
</main>
</div>
<!--- LOADER -->
<div id="loader">
{% if has_logo %}
<img id="loading-logo" src="{{logo_url}}">
{% else %}
<img id="loading-logo" src="https://avatars1.githubusercontent.com/u/12033642">
{% endif %}
</div>
<script>
// Listen to clicks on breadcrumb (for responsive header)
document.getElementById("navbar-burger")
.addEventListener("click", () => {
document.getElementById("navbar-burger")
.classList.toggle("is-active");
document.getElementById("navbar-content")
.classList.toggle("is-active");
})
// Helper function to navigate between views
function changeView(classPrefix) {
// Enable/disable the active class on buttons
let buttons = document.getElementById("buttons-container")
.childNodes;
let toggledButtonId = classPrefix + "-button";
for(let i=0; i<buttons.length; i++) {
console.log()
if(toggledButtonId == buttons[i].id) {
buttons[i].classList.add("is-active");
}
else if(buttons[i].classList != undefined) {
buttons[i].classList.remove("is-active");
}
}
// Show hide the containers
let containers = document.getElementsByTagName("MAIN");
let toggledContainerId = classPrefix + "-container"
for(let i=0; i<containers.length; i++) {
if(toggledContainerId == containers[i].id) {
containers[i].style.display = "block";
}
else {
containers[i].style.display = "none";
}
}
}
// Listen to clicks on "Compare runs" button
document.getElementById("compare-runs-button")
.addEventListener("click", () => {
// Nothing else to do for this button
changeView("compare-runs");
});
// Listen to clicks on "Inspect runs" button
// (dedicated function as this needs to be called in a CustomJS callback)
function goToInspectRuns() {
window.scrollTo(0, 0);
changeView("inspect-runs");
}
document.getElementById("inspect-runs-button")
.addEventListener("click", goToInspectRuns);
// Toggle the display properties of the loader/report
function removeLoader() {
document.getElementById("loader")
.style.display = "none";
document.getElementById("report")
.style.display = "";
}
// To detect the end of Bokeh initialization and remove the loader,
// we look at the number of children of a div containing widgets
let nChildren = document.getElementById('compare-widgets')
.getElementsByTagName('*').length;
function pollBokehLoading() {
let newNChildren = document.getElementById('compare-widgets')
.getElementsByTagName('*').length;
if(newNChildren != nChildren) {
removeLoader();
}
else {
setTimeout(pollBokehLoading, 100);
}
}
setTimeout(pollBokehLoading, 100);
// Update the run metadata (in inspect run mode)
function updateRunMetadata(runId) {
// Assume runId is the run's timestamp
let run = metadata[runId];
// If it is undefined, perform a search by name
// (by iterating metadata)
if(!run) {
for(let [key, value] of Object.entries(metadata)) {
if (!metadata.hasOwnProperty(key)) continue;
if(value.name == runId) {
run = value;
break;
}
}
}
document.getElementById("run-date").innerHTML = run.date;
if(run.is_git_commit) {
document.getElementById("is-git-commit").style.display = "";
document.getElementById("not-git-commit").style.display = "none";
document.getElementById("run-hash").innerHTML = run.hash;
document.getElementById("run-author").innerHTML = run.author;
document.getElementById("run-message").innerHTML = run.message;
{% if git_repo_linked %}
document.getElementById("git-commit-link")
.setAttribute("href", "{{commit_link}}" + run.hash);
{% endif %}
} else {
document.getElementById("is-git-commit").style.display = "none";
document.getElementById("not-git-commit").style.display = "";
document.getElementById("run-hash").innerHTML = "";
document.getElementById("run-author").innerHTML = "";
document.getElementById("run-message").innerHTML = "";
}
}
//Object containing metadata from all runs
metadata = {{metadata}}
// Initial run using the template arg
updateRunMetadata({{initial_timestamp}});
</script>
{% endblock %}
</body>
</html>

View File

@ -0,0 +1,7 @@
## Verificarlo CI : `{{dev_branch}}`
You are on the `{{ci_branch}}` branch, which is automatically updated with the
[Verificarlo](https://github.com/verificarlo/verificarlo) test results from
`{{dev_branch}}` (in the `vfcruns` directory).
You can start a Verificarlo CI server at anytime using the run files of this branch.

View File

@ -0,0 +1,42 @@
# This workflow will be executed when {{dev_branch}} is updated:
# it will run the configured tests and upload the results on vfc_ci_master.
image: verificarlo/verificarlo
stages:
- run_verificarlo_tests
run_verificarlo_tests:
stage: run_verificarlo_tests
before_script:
- git remote set-url origin https://{{username}}:${CI_PUSH_TOKEN}@{{remote_url}}.git
- git config --global user.email "{{email}}"
- git config --global user.name "{{username}}"
script:
# We will probably drop these installations when integrating CI into
# Verificarlo
- pip install numpy scipy pandas bokeh jinja2 tables GitPython
- apt update
- apt install wget
- wget https://raw.githubusercontent.com/verificarlo/significantdigits/main/sigdigits.py -P /usr/local/lib/python3.8/dist-packages
- ./vfc_ci test -g -r
- git_hash=$(git rev-parse --short "$CI_COMMIT_SHA")
- git fetch --all
- git checkout -b {{ci_branch}} origin/{{ci_branch}}
- mkdir -p vfcruns
- mv *.vfcrun.hd5 vfcruns
- git add vfcruns/*
- git commit -m "[auto] New test results for commit ${git_hash}"
- git push
rules:
- if: '$CI_COMMIT_BRANCH == "{{dev_branch}}"'
artifacts:
paths:
- "*.vfcraw.hd5"

View File

@ -0,0 +1,57 @@
# This workflow will be executed when {{dev_branch}} is updated:
# it will run the configured tests and upload the results on {{ci_branch}}.
name: "Verificarlo CI ({{dev_branch}})"
on:
# Triggers the workflow when {{dev_branch}} is updated
push:
branches: [ {{dev_branch}} ]
workflow_dispatch:
jobs:
run_verificarlo_tests:
runs-on: ubuntu-latest
container: verificarlo/verificarlo
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
# We will probably drop these installations when integrating CI into
# Verificarlo
- name: Install Python requirements
run: |
pip install numpy scipy pandas bokeh jinja2 tables GitPython
apt install wget
wget https://raw.githubusercontent.com/verificarlo/significantdigits/main/sigdigits.py -P /usr/local/lib/python3.8/dist-packages
- name: Run tests
# We assume the script is included in the repo for now
# (we'll probably want to remove "./" if the script ends up being integrated
# in Verificarlo and becomes available system-wide)
run: ./vfc_ci test -g -r
- name: Commit test results
run: |
git_hash=$(git rev-parse --short "$GITHUB_SHA")
git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
git checkout {{ci_branch}}
mkdir -p vfcruns
mv *.vfcrun.hd5 vfcruns
git add vfcruns/*
git commit -m "[auto] New test results for commit ${git_hash}"
git push
- name: Upload raw results as artifacts
uses: actions/upload-artifact@v2
with:
{% raw %}name: ${{github.sha}}.vfcraw{% endraw %}
path: ./*.vfcraw.hd5

248
include/vfc_hashmap.h Normal file
View File

@ -0,0 +1,248 @@
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/*
* Copyright 2012 Couchbase, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#define HASH_MULTIPLIER 31
static const unsigned int hashmap_prime_1 = 73;
static const unsigned int hashmap_prime_2 = 5009;
#ifndef __VFC_HASHMAP_HEADER__
struct vfc_hashmap_st {
size_t nbits;
size_t mask;
size_t capacity;
size_t *items;
size_t nitems;
size_t n_deleted_items;
};
typedef struct vfc_hashmap_st *vfc_hashmap_t;
// allocate and initialize the map
vfc_hashmap_t vfc_hashmap_create();
// get the value at an index of a map
size_t get_value_at(size_t *items, size_t i);
// get the key at an index of a map
size_t get_key_at(size_t *items, size_t i);
// set the value at an index of a map
void set_value_at(size_t *items, size_t value, size_t i);
// set the key at an index of a map
void set_key_at(size_t *items, size_t key, size_t i);
// free the map
void vfc_hashmap_destroy(vfc_hashmap_t map);
// insert an element in the map
void vfc_hashmap_insert(vfc_hashmap_t map, size_t key, void *item);
// remove an element of the map
void vfc_hashmap_remove(vfc_hashmap_t map, size_t key);
// test if an element is in the map
char vfc_hashmap_have(vfc_hashmap_t map, size_t key);
// get an element of the map
void *vfc_hashmap_get(vfc_hashmap_t map, size_t key);
// get the number of elements in the map
size_t vfc_hashmap_num_items(vfc_hashmap_t map);
// Hash function
size_t vfc_hashmap_str_function(const char *id);
#endif
/***************** Verificarlo hashmap FUNCTIONS ********************
* The following set of functions are used in backends and wrapper
* to stock and access quickly internal data.
*******************************************************************/
// free the map
void vfc_hashmap_destroy(vfc_hashmap_t map) {
if (map) {
free(map->items);
}
free(map);
}
// allocate and initialize the map
vfc_hashmap_t vfc_hashmap_create() {
vfc_hashmap_t map = (vfc_hashmap_t) calloc(1, sizeof(struct vfc_hashmap_st));
if (map == NULL) {
return NULL;
}
map->nbits = 3;
map->capacity = (size_t)(1 << map->nbits);
map->mask = map->capacity - 1;
// an item is now a value and a key
map->items = (size_t *) calloc(map->capacity, 2 * sizeof(size_t));
if (map->items == NULL) {
vfc_hashmap_destroy(map);
return NULL;
}
map->nitems = 0;
map->n_deleted_items = 0;
return map;
}
size_t get_value_at(size_t *items, size_t i) { return items[i * 2]; }
size_t get_key_at(size_t *items, size_t i) { return items[(i * 2) + 1]; }
void set_value_at(size_t *items, size_t value, size_t i) {
items[i * 2] = value;
}
void set_key_at(size_t *items, size_t key, size_t i) {
items[(i * 2) + 1] = key;
}
// add a member in the table
static int hashmap_add_member(vfc_hashmap_t map, size_t key, void *item) {
size_t value = (size_t)item;
size_t ii;
if (value == 0 || value == 1) {
return -1;
}
ii = map->mask & (hashmap_prime_1 * key);
while (get_value_at(map->items, ii) != 0 &&
get_value_at(map->items, ii) != 1) {
if (get_value_at(map->items, ii) == value) {
return 0;
} else {
/* search free slot */
ii = map->mask & (ii + hashmap_prime_2);
}
}
map->nitems++;
if (get_value_at(map->items, ii) == 1) {
map->n_deleted_items--;
}
set_value_at(map->items, value, ii);
set_key_at(map->items, key, ii);
return 1;
}
// rehash the table if necessary
static void maybe_rehash_map(vfc_hashmap_t map) {
size_t *old_items;
size_t old_capacity, ii;
if (map->nitems + map->n_deleted_items >= (double)map->capacity * 0.85) {
old_items = map->items;
old_capacity = map->capacity;
map->nbits++;
map->capacity = (size_t)(1 << map->nbits);
map->mask = map->capacity - 1;
map->items = (size_t *) calloc(map->capacity, 2 * sizeof(size_t));
map->nitems = 0;
map->n_deleted_items = 0;
for (ii = 0; ii < old_capacity; ii++) {
hashmap_add_member(map, get_key_at(old_items, ii),
(void *)get_value_at(old_items, ii));
}
free(old_items);
}
}
// insert an element in the map
void vfc_hashmap_insert(vfc_hashmap_t map, size_t key, void *item) {
hashmap_add_member(map, key, item);
maybe_rehash_map(map);
}
// remove an element of the map
void vfc_hashmap_remove(vfc_hashmap_t map, size_t key) {
size_t ii = map->mask & (hashmap_prime_1 * key);
while (get_value_at(map->items, ii) != 0) {
if (get_key_at(map->items, ii) == key) {
set_value_at(map->items, 1, ii);
map->nitems--;
map->n_deleted_items++;
break;
} else {
ii = map->mask & (ii + hashmap_prime_2);
}
}
}
// test if an element is in the map
char vfc_hashmap_have(vfc_hashmap_t map, size_t key) {
size_t ii = map->mask & (hashmap_prime_1 * key);
while (get_value_at(map->items, ii) != 0) {
if (get_key_at(map->items, ii) == key) {
return 1;
} else {
ii = map->mask & (ii + hashmap_prime_2);
}
}
return 0;
}
// get an element of the map
void *vfc_hashmap_get(vfc_hashmap_t map, size_t key) {
size_t ii = map->mask & (hashmap_prime_1 * key);
while (get_value_at(map->items, ii) != 0) {
if (get_key_at(map->items, ii) == key) {
return (void *)get_value_at(map->items, ii);
} else {
ii = map->mask & (ii + hashmap_prime_2);
}
}
return NULL;
}
// get the number of elements in the map
size_t vfc_hashmap_num_items(vfc_hashmap_t map) { return map->nitems; }
// Hash function for strings
size_t vfc_hashmap_str_function(const char *id) {
unsigned const char *us;
us = (unsigned const char *)id;
size_t index = 0;
while (*us != '\0') {
index = index * HASH_MULTIPLIER + *us;
us++;
}
return index;
}
// Free the hashmap
void vfc_hashmap_free(vfc_hashmap_t map) {
for (int ii = 0; ii < map->capacity; ii++)
if (get_value_at(map->items, ii) != 0 && get_value_at(map->items, ii) != 0)
free((void *)get_value_at(map->items, ii));
}

254
include/vfc_probe.h Normal file
View File

@ -0,0 +1,254 @@
/*
* This file defines "vfc_probes", a hashtable-based structure which can be used
* to place "probes" in a code and store the different values of test variables.
* These test results can then be exported in a CSV file, and used to generate a
* Verificarlo test report.
*/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include "vfc_hashmap.h"
#define VAR_NAME(var) #var // Simply returns the name of var into a string
/*
* A probe containing a double value as well as its key, which is needed when
* dumping the probes
*/
struct vfc_probe_node {
char * key;
double value;
};
typedef struct vfc_probe_node vfc_probe_node;
/*
* The probes structure. It simply acts as a wrapper for a Verificarlo hashmap.
*/
struct vfc_probes {
vfc_hashmap_t map;
};
typedef struct vfc_probes vfc_probes;
/*
* Initialize an empty vfc_probes instance
*/
vfc_probes vfc_init_probes() {
vfc_probes probes;
probes.map = vfc_hashmap_create();
return probes;
}
/*
* Free all probes
*/
void vfc_free_probes(vfc_probes * probes) {
// Before freeing the map, iterate manually over all items to free the keys
vfc_probe_node * probe = NULL;
for(int i = 0; i < probes->map->capacity; i++) {
probe = (vfc_probe_node*) get_value_at(probes->map->items, i);
if(probe != NULL) {
if(probe->key != NULL) {
free(probe->key);
}
}
}
vfc_hashmap_free(probes->map);
}
/*
* Helper function to generate the key from test and variable name
*/
char * gen_probe_key(char * testName, char * varName) {
char * key = (char *) malloc(strlen(testName) + strlen(varName) + 2);
strcpy(key, testName);
strcat(key, ",");
strcat(key, varName);
return key;
}
/*
* Helper function to detect forbidden character ',' in the keys
*/
void validate_probe_key(char * str) {
unsigned int len = strlen(str);
for(unsigned int i=0; i<len; i++) {
if(str[i] == ',') {
fprintf(
stderr,
"Error [verificarlo]: One of your probes has a ',' in its test \
or variable name (\"%s\"), which is forbidden\n",
str
);
exit(1);
}
}
}
/*
* Add a new probe. If an issue with the key is detected (forbidden characters or
* a duplicate key), an error will be thrown.
*/
int vfc_put_probe(
vfc_probes * probes,
char * testName, char * varName,
double val
) {
if(probes == NULL) {
return 1;
}
// Make sure testName and varName don't contain any ',', which would
// interfere with the key/CSV encoding
validate_probe_key(testName);
validate_probe_key(varName);
// Get the key, which is : testName + "," + varName
char * key = gen_probe_key(testName, varName);
// Look for a duplicate key
vfc_probe_node * oldProbe = (vfc_probe_node*) vfc_hashmap_get(
probes->map, vfc_hashmap_str_function(key)
);
if(oldProbe != NULL) {
if(strcmp(key, oldProbe->key) == 0) {
fprintf(
stderr,
"Error [verificarlo]: you have a duplicate error with one of \
your probes (\"%s\"). Please make sure to use different names.\n",
key
);
exit(1);
}
}
// Insert the element in the hashmap
vfc_probe_node * newProbe = (vfc_probe_node*) malloc(sizeof(vfc_probe_node));
newProbe->key = key;
newProbe->value = val;
vfc_hashmap_insert(
probes->map, vfc_hashmap_str_function(key), newProbe
);
return 0;
}
/*
* Remove (free) an element from the hash table
*/
int vfc_remove_probe(vfc_probes * probes, char * testName, char * varName) {
if(probes == NULL) {
return 1;
}
// Get the key, which is : testName + "," + varName
char * key = gen_probe_key(testName, varName);
vfc_hashmap_remove(probes->map, vfc_hashmap_str_function(key));
return 0;
}
/*
* Return the number of probes stored in the hashmap
*/
unsigned int vfc_num_probes(vfc_probes * probes) {
return vfc_hashmap_num_items(probes->map);
}
/*
* Dump probes in a .csv file (the double values are converted to hex), then
* free it.
*/
int vfc_dump_probes(vfc_probes * probes) {
if(probes == NULL) {
return 1;
}
// Get export path from the VFC_PROBES_OUTPUT env variable
char* exportPath = getenv("VFC_PROBES_OUTPUT");
if(!exportPath) {
printf(
"Warning [verificarlo]: VFC_PROBES_OUTPUT is not set, probes will \
not be dumped\n"
);
vfc_free_probes(probes);
return 0;
}
FILE * fp = fopen(exportPath, "w");
if(fp == NULL) {
fprintf(
stderr,
"Error [verificarlo]: impossible to open the CSV file to save your \
probes (\"%s\")\n",
exportPath
);
exit(1);
}
// First line gives the column names
fprintf(fp, "test,variable,value\n");
// Iterate over all table elements
vfc_probe_node * probe = NULL;
for(int i = 0; i < probes->map->capacity; i++) {
probe = (vfc_probe_node*) get_value_at(probes->map->items, i);
if(probe != NULL) {
fprintf(
fp, "%s,%a\n",
probe->key,
probe->value
);
}
}
fclose(fp);
vfc_free_probes(probes);
return 0;
}

196
tests/vfc_test_h5.cpp Normal file
View File

@ -0,0 +1,196 @@
// This files is almost the same as test_h5.cpp, with the difference that it
// dumps Verificarlo probes for vfc_ci integration, and that it reads a list of
// cycles in a CSV file, instead of accepting a start and an end cycle (which
// makes it easier to select the exact cycles we are interested in with vfc_ci).
#include <hdf5/serial/hdf5.h>
#include <hdf5/serial/H5Cpp.h>
#include <vector>
#include <fstream>
#include <sstream>
#include "SM_MaponiA3.hpp"
#include "SM_Standard.hpp"
#include "SM_Helpers.hpp"
#include "vfc_probe.h"
using namespace H5;
// #define DEBUG
const H5std_string FILE_NAME( "datasets/dataset.hdf5" );
double residual_max(double * A, unsigned int Dim) {
double max = 0.0;
for (unsigned int i = 0; i < Dim; i++) {
for (unsigned int j = 0; j < Dim; j++) {
double delta = (A[i * Dim + j] - (i == j));
delta = abs(delta);
if (delta > max) max = delta;
}
}
return max;
}
double residual2(double * A, unsigned int Dim) {
double res = 0.0;
for (unsigned int i = 0; i < Dim; i++) {
for (unsigned int j = 0; j < Dim; j++) {
double delta = (A[i * Dim + j] - (i == j));
res += delta*delta;
}
}
return res;
}
void read_int(H5File file, std::string key, unsigned int * data) {
DataSet ds = file.openDataSet(key);
ds.read(data, PredType::STD_U32LE);
ds.close();
}
void read_double(H5File file, std::string key, double * data) {
DataSet ds = file.openDataSet(key);
ds.read(data, PredType::IEEE_F64LE);
ds.close();
}
/* Return a vector containing all cycles to execute by reading a data file */
std::vector<int> get_cycles_list(std::string path) {
std::ifstream file_stream(path);
std::stringstream string_stream;
string_stream << file_stream.rdbuf();
std::string cycle_str;
std::vector<int> cycles_list = {};
while(string_stream >> cycle_str) {
cycles_list.push_back(std::stoi(cycle_str));
}
return cycles_list;
}
int test_cycle(H5File file, int cycle, std::string version, vfc_probes * probes) {
/* Read the data */
std::string group = "cycle_" + std::to_string(cycle);
try{
file.openGroup(group);
} catch(H5::Exception& e){
std::cerr << "group " << group << "not found" << std::endl;
return 0;
}
unsigned int dim, nupdates, col, i, j;
read_int(file, group + "/slater_matrix_dim", &dim);
read_int(file, group + "/nupdates", &nupdates);
double * slater_matrix = new double[dim*dim];
read_double(file, group + "/slater_matrix", slater_matrix);
double * slater_inverse = new double[dim*dim];
read_double(file, group + "/slater_inverse", slater_inverse);
//slater_inverse = transpose(slater_inverse, dim);
unsigned int * col_update_index = new unsigned int[nupdates];
read_int(file, group + "/col_update_index", col_update_index);
double * updates = new double[nupdates*dim];
read_double(file, group + "/updates", updates);
double * u = new double[nupdates*dim];
/* Test */
#ifdef DEBUG
showMatrix(slater_matrix, dim, "OLD Slater");
#endif
#ifdef DEBUG
showMatrix(slater_inverse, dim, "OLD Inverse");
#endif
for (j = 0; j < nupdates; j++) {
for (i = 0; i < dim; i++) {
col = col_update_index[j];
u[i + j*dim] = updates[i + j*dim] - slater_matrix[i*dim + (col - 1)];
slater_matrix[i*dim + (col - 1)] = updates[i + j*dim];
}
}
if (version == "maponia3") {
MaponiA3(slater_inverse, dim, nupdates, u, col_update_index);
} else if (version == "sm1") {
SM1(slater_inverse, dim, nupdates, u, col_update_index);
} else if (version == "sm2") {
SM2(slater_inverse, dim, nupdates, u, col_update_index);
} else if (version == "sm3") {
SM3(slater_inverse, dim, nupdates, u, col_update_index);
} else {
std::cerr << "Unknown version " << version << std::endl;
exit(1);
}
#ifdef DEBUG
showMatrix(slater_matrix, dim, "NEW Slater");
#endif
#ifdef DEBUG
showMatrix(slater_inverse, dim, "NEW Inverse");
#endif
double * res = new double[dim*dim] {0};
matMul(slater_matrix, slater_inverse, res, dim);
bool ok = is_identity(res, dim, 1e-3);
double res_max = residual_max(res, dim);
double res2 = residual2(res, dim);
#ifdef DEBUG
showMatrix(res, dim, "Result");
#endif
vfc_put_probe(probes, &(group)[0], &("res_max_" + version)[0], res_max);
vfc_put_probe(probes, &(group)[0], &("res2_" + version)[0], res2);
delete [] res, updates, u, col_update_index,
slater_matrix, slater_inverse;
return ok;
}
int main(int argc, char **argv) {
if (argc != 3) {
std::cerr << "Execute from within '/'" << std::endl;
std::cerr << "usage: test_h5 <version> <start cycle> <stop cycle>" << std::endl;
return 1;
}
std::string version(argv[1]);
std::vector<int> cycles_list = get_cycles_list(argv[2]);
H5File file(FILE_NAME, H5F_ACC_RDONLY);
vfc_probes probes = vfc_init_probes();
probes = vfc_init_probes();
bool ok;
for (int i = 0; i < cycles_list.size(); i++) {
ok = test_cycle(file, cycles_list[i], version, &probes);
if (ok) {
std::cout << "ok -- cycle " << std::to_string(i)
<< std::endl;
}
else {
std::cerr << "failed -- cycle " << std::to_string(i)
<< std::endl;
}
}
vfc_dump_probes(&probes);
return ok;
}

205
vfc_ci Executable file
View File

@ -0,0 +1,205 @@
#!/usr/bin/env python3
# This is the entry point of the Verificarlo CI command line interface, which is
# based on argparse and this article :
# https://mike.depalatis.net/blog/simplifying-argparse.html
# From here, 3 subcommands can be called :
# - setup : create a vfc_ci branch and workflow on the current Git repo
# - test : run and export test results according to the vfc_tests_config.json
# - serve : launch a Bokeh server to visualize run results
import argparse
################################################################################
# Parameters validation helpers
def is_port(string):
value = int(string)
if value < 0 or value > 65535:
raise argparse.ArgumentTypeError("Value has to be between 0 and 65535")
return value
def is_directory(string):
import os
isdir = os.path.isdir(string)
if not isdir:
raise argparse.ArgumentTypeError("Directory does not exist")
return string
################################################################################
# Subcommand decorator
cli = argparse.ArgumentParser(
description="Define, run, automatize, and visualize custom Verificarlo tests."
)
subparsers = cli.add_subparsers(dest="subcommand")
def subcommand(description="", args=[], parent=subparsers):
def decorator(func):
parser = parent.add_parser(func.__name__, description=description)
for arg in args:
parser.add_argument(*arg[0], **arg[1])
parser.set_defaults(func=func)
return decorator
def argument(*name_or_flags, **kwargs):
return ([*name_or_flags], kwargs)
################################################################################
# "setup" subcommand
@subcommand(
description="Create an automated workflow to execute Verificarlo tests.",
args = [
argument(
"git_host",
help="""
specify where your repository is hosted
""",
choices=["github", "gitlab"]
)
]
)
def setup(args):
import ci.setup
ci.setup.setup(args.git_host)
# "test" subcommand
@subcommand(
description="Execute predefined Verificarlo tests and save their results.",
args = [
argument(
"-g", "--is-git-commit",
help="""
When specified, the last Git commit of the local repository (working
directory) will be fetched and associated with the run.
""",
action="store_true"
),
argument(
"-r", "--export-raw-results",
help="""
Specify if an additional HDF5 file containing the raw results must be
exported.
""",
action="store_true"
),
argument(
"-d", "--dry-run",
help="""
Perform a dry run by not saving the test results.
""",
action="store_true"
)
]
)
def test(args):
import ci.test
ci.test.run(args.is_git_commit, args.export_raw_results, args.dry_run)
# "serve" subcommand
@subcommand(
description="""
Start a server to visualize Verificarlo test results.
""",
args = [
argument(
"-s", "--show",
help="""
Specify if the report must be opened in the browser at server
startup.
""",
action="store_true"
),
argument(
"-gd", "--git-directory",
help="""
Path to a local Git repository. The report will be linked to the
remote URL (GitHub and GitLab are supported).
""",
type=is_directory
),
argument(
"-gu", "--git-url",
help="""
GitHub or GitLab repository URL. The report will be linked to this
URL.
""",
type=str
),
argument(
"-p", "--port",
help="""
The port on which the server will run. Defaults to 8080.
""",
type=is_port,
default=8080
),
argument(
"-a", "--allow-origin",
help="""
The origin (URL) from which the report will be accessible.
Port number must not be specified. Defaults to * (allow everything).
""",
type=str,
default="*"
),
argument(
"-l", "--logo",
help="""
Specify the URL of an image to be displayed in the report header.
""",
type=str
)
]
)
def serve(args):
# git_directory and git_url are supposed to be exclusive
if args.git_directory != None and args.git_url != None:
raise argparse.ArgumentTypeError(
"\"-gd\" / \"--git-directory\" and \"-gu\" / \"--git-url\" are "\
"mutually exclusive. Please make sure to use at most one of them."
)
import ci.serve
ci.serve.serve(
args.show,
args.git_directory,
args.git_url,
args.port,
args.allow_origin,
args.logo
)
###############################################################################
# Main command group and entry point
if __name__ == "__main__":
args = cli.parse_args()
if args.subcommand is None:
cli.print_help()
else:
args.func(args)

2
vfc_ci_cycles.txt Normal file
View File

@ -0,0 +1,2 @@
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
327 655 983 1311 1639 1967 2295 2623 2951 3279 3607 3935 4263 4591 4919 5247 5575 5903 6231 6559 6887 7215 7543 7871 8199

92
vfc_tests_config.json Normal file
View File

@ -0,0 +1,92 @@
{
"make_command": "make -f Makefile.vfc_ci",
"executables": [
{
"executable": "bin/vfc_test_h5",
"parameters" : "maponia3 vfc_ci_cycles.txt",
"vfc_backends": [
{
"name": "libinterflop_mca.so",
"repetitions": 50
}
]
},
{
"executable": "bin/vfc_test_h5",
"parameters" : "sm1 vfc_ci_cycles.txt",
"vfc_backends": [
{
"name": "libinterflop_mca.so",
"repetitions": 50
}
]
},
{
"executable": "bin/vfc_test_h5",
"parameters" : "sm2 vfc_ci_cycles.txt",
"vfc_backends": [
{
"name": "libinterflop_mca.so",
"repetitions": 50
}
]
},
{
"executable": "bin/vfc_test_h5",
"parameters" : "sm3 vfc_ci_cycles.txt",
"vfc_backends": [
{
"name": "libinterflop_mca.so",
"repetitions": 50
}
]
},
{
"executable": "bin/vfc_test_h5",
"parameters" : "maponia3 vfc_ci_cycles.txt",
"vfc_backends": [
{
"name": "libinterflop_mca.so --mode=rr",
"repetitions": 50
}
]
},
{
"executable": "bin/vfc_test_h5",
"parameters" : "sm1 vfc_ci_cycles.txt",
"vfc_backends": [
{
"name": "libinterflop_mca.so --mode=rr",
"repetitions": 50
}
]
},
{
"executable": "bin/vfc_test_h5",
"parameters" : "sm2 vfc_ci_cycles.txt",
"vfc_backends": [
{
"name": "libinterflop_mca.so --mode=rr",
"repetitions": 50
}
]
},
{
"executable": "bin/vfc_test_h5",
"parameters" : "sm3 vfc_ci_cycles.txt",
"vfc_backends": [
{
"name": "libinterflop_mca.so --mode=rr",
"repetitions": 50
}
]
}
]
}