mirror of
https://github.com/TREX-CoE/Sherman-Morrison.git
synced 2024-12-25 13:53:56 +01:00
Update vfc_ci code
This commit is contained in:
parent
d81777e347
commit
44f0fc1f51
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -3,24 +3,24 @@
|
||||
|
||||
import os
|
||||
|
||||
|
||||
def serve(show, git_directory, git_url, port, allow_origin, logo_url):
|
||||
|
||||
# Prepare arguments
|
||||
show = "--show" if show else ""
|
||||
|
||||
git = ""
|
||||
if git_directory != None:
|
||||
if git_directory is not None:
|
||||
git = "git directory %s" % git_directory
|
||||
if git_url != None:
|
||||
if git_url is not None:
|
||||
git = "git url %s" % git_url
|
||||
|
||||
logo = ""
|
||||
if logo_url != None:
|
||||
if logo_url is not None:
|
||||
logo = "logo %s" % logo_url
|
||||
|
||||
dirname = os.path.dirname(__file__)
|
||||
|
||||
|
||||
# Call the "bokeh serve" command on the system
|
||||
command = "bokeh serve %s/vfc_ci_report %s --allow-websocket-origin=%s:%s --port %s --args %s %s" \
|
||||
% (dirname, show, allow_origin, port, port, git, logo)
|
||||
|
51
ci/setup.py
51
ci/setup.py
@ -6,10 +6,10 @@ import sys
|
||||
import os
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
################################################################################
|
||||
##########################################################################
|
||||
|
||||
# Helper functions
|
||||
|
||||
# Helper functions
|
||||
|
||||
def gen_readme(dev_branch, ci_branch):
|
||||
|
||||
@ -26,17 +26,16 @@ def gen_readme(dev_branch, ci_branch):
|
||||
fh.write(render)
|
||||
|
||||
|
||||
|
||||
def gen_workflow(git_host, dev_branch, ci_branch, repo):
|
||||
|
||||
# Init template loader
|
||||
path = os.path.dirname(os.path.abspath(__file__))
|
||||
env = Environment(loader=FileSystemLoader(path))
|
||||
|
||||
|
||||
if git_host == "github":
|
||||
# Load template
|
||||
template = env.get_template("workflow_templates/vfc_test_workflow.j2.yml")
|
||||
template = env.get_template(
|
||||
"workflow_templates/vfc_test_workflow.j2.yml")
|
||||
|
||||
# Render it
|
||||
render = template.render(dev_branch=dev_branch, ci_branch=ci_branch)
|
||||
@ -47,13 +46,14 @@ def gen_workflow(git_host, dev_branch, ci_branch, repo):
|
||||
with open(filename, "w") as fh:
|
||||
fh.write(render)
|
||||
|
||||
|
||||
if git_host == "gitlab":
|
||||
template = env.get_template("workflow_templates/gitlab-ci.j2.yml")
|
||||
|
||||
# Ask for the user who will run the jobs (Gitlab specific)
|
||||
username = input("[vfc_ci] Enter the name of the user who will run the CI jobs:")
|
||||
email = input("[vfc_ci] Enter the e-mail of the user who will run the CI jobs:")
|
||||
username = input(
|
||||
"[vfc_ci] Enter the name of the user who will run the CI jobs:")
|
||||
email = input(
|
||||
"[vfc_ci] Enter the e-mail of the user who will run the CI jobs:")
|
||||
|
||||
remote_url = repo.remotes[0].config_reader.get("url")
|
||||
remote_url = remote_url.replace("http://", "")
|
||||
@ -64,7 +64,7 @@ def gen_workflow(git_host, dev_branch, ci_branch, repo):
|
||||
ci_branch=ci_branch,
|
||||
username=username,
|
||||
email=email,
|
||||
remote_url = remote_url
|
||||
remote_url=remote_url
|
||||
)
|
||||
|
||||
filename = ".gitlab-ci.yml"
|
||||
@ -72,12 +72,10 @@ def gen_workflow(git_host, dev_branch, ci_branch, repo):
|
||||
fh.write(render)
|
||||
|
||||
|
||||
|
||||
################################################################################
|
||||
##########################################################################
|
||||
|
||||
def setup(git_host):
|
||||
|
||||
|
||||
# Init repo and make sure that the workflow setup is possible
|
||||
|
||||
repo = git.Repo(".")
|
||||
@ -91,8 +89,9 @@ def setup(git_host):
|
||||
dev_branch_name = str(dev_branch)
|
||||
dev_remote = dev_branch.tracking_branch()
|
||||
|
||||
# Make sure that the active branch (on which to setup the workflow) has a remote
|
||||
assert(dev_remote != None), "Error [vfc_ci]: The current branch doesn't " \
|
||||
# Make sure that the active branch (on which to setup the workflow) has a
|
||||
# remote
|
||||
assert(dev_remote is not None), "Error [vfc_ci]: The current branch doesn't " \
|
||||
"have a remote."
|
||||
|
||||
# Make sure that we are not behind the remote (so we can push safely later)
|
||||
@ -101,8 +100,6 @@ def setup(git_host):
|
||||
assert(commits_behind == []), "Error [vfc_ci]: The local branch seems " \
|
||||
"to be at least one commit behind remote."
|
||||
|
||||
|
||||
|
||||
# Commit the workflow on the current (dev) branch
|
||||
|
||||
ci_branch_name = "vfc_ci_%s" % dev_branch_name
|
||||
@ -111,12 +108,10 @@ def setup(git_host):
|
||||
repo.index.commit("[auto] Set up Verificarlo CI on this branch")
|
||||
repo.remote(name="origin").push()
|
||||
|
||||
|
||||
|
||||
# Create the CI branch (orphan branch with a readme on it)
|
||||
# (see : https://github.com/gitpython-developers/GitPython/issues/615)
|
||||
|
||||
repo.head.reference = git.Head(repo, "refs/heads/"+ ci_branch_name)
|
||||
repo.head.reference = git.Head(repo, "refs/heads/" + ci_branch_name)
|
||||
|
||||
repo.index.remove(["*"])
|
||||
gen_readme(dev_branch_name, ci_branch_name)
|
||||
@ -133,23 +128,19 @@ def setup(git_host):
|
||||
# Force checkout back to the original (dev) branch
|
||||
repo.git.checkout(dev_branch_name, force=True)
|
||||
|
||||
|
||||
|
||||
# Print termination messages
|
||||
|
||||
print(
|
||||
"Info [vfc_ci]: A Verificarlo CI workflow has been setup on " \
|
||||
"Info [vfc_ci]: A Verificarlo CI workflow has been setup on "
|
||||
"%s." % dev_branch_name
|
||||
)
|
||||
print(
|
||||
"Info [vfc_ci]: Make sure that you have a \"vfc_tests_config.json\" on " \
|
||||
"this branch. You can also perform a \"vfc_ci test\" dry run before "\
|
||||
"pushing other commits."
|
||||
)
|
||||
"Info [vfc_ci]: Make sure that you have a \"vfc_tests_config.json\" on "
|
||||
"this branch. You can also perform a \"vfc_ci test\" dry run before "
|
||||
"pushing other commits.")
|
||||
|
||||
if git_host == "gitlab":
|
||||
print(
|
||||
"Info [vfc_ci]: Since you are using GitLab, make sure that you " \
|
||||
"have created an access token for the user you specified (registered "\
|
||||
"as a variable called \"CI_PUSH_TOKEN\" in your repository)."
|
||||
)
|
||||
"Info [vfc_ci]: Since you are using GitLab, make sure that you "
|
||||
"have created an access token for the user you specified (registered "
|
||||
"as a variable called \"CI_PUSH_TOKEN\" in your repository).")
|
||||
|
136
ci/test.py
136
ci/test.py
@ -1,6 +1,10 @@
|
||||
# This script reads the vfc_tests_config.json file and executes tests accordingly
|
||||
# It will also generate a ... .vfcrun.hd5 file with the results of the run
|
||||
# It will also generate a ... .vfcrunh5 file with the results of the run
|
||||
|
||||
import sigdigits as sd
|
||||
import scipy.stats
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import os
|
||||
import json
|
||||
|
||||
@ -12,21 +16,15 @@ import time
|
||||
import pickle
|
||||
pickle.HIGHEST_PROTOCOL = 4
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import scipy.stats
|
||||
|
||||
import sigdigits as sd
|
||||
|
||||
# Magic numbers
|
||||
min_pvalue = 0.05
|
||||
max_zscore = 3
|
||||
|
||||
|
||||
################################################################################
|
||||
##########################################################################
|
||||
|
||||
|
||||
# Helper functions
|
||||
# Helper functions
|
||||
|
||||
# Read a CSV file outputted by vfc_probe as a Pandas dataframe
|
||||
def read_probes_csv(filepath, backend, warnings, execution_data):
|
||||
@ -36,35 +34,34 @@ def read_probes_csv(filepath, backend, warnings, execution_data):
|
||||
|
||||
except FileNotFoundError:
|
||||
print(
|
||||
"Warning [vfc_ci]: Probes not found, your code might have crashed " \
|
||||
"Warning [vfc_ci]: Probes not found, your code might have crashed "
|
||||
"or you might have forgotten to call vfc_dump_probes"
|
||||
)
|
||||
warnings.append(execution_data)
|
||||
return pd.DataFrame(
|
||||
columns = ["test", "variable", "values", "vfc_backend"]
|
||||
columns=["test", "variable", "values", "vfc_backend"]
|
||||
)
|
||||
|
||||
except Exception:
|
||||
print(
|
||||
"Warning [vfc_ci]: Your probes could not be read for some unknown " \
|
||||
"Warning [vfc_ci]: Your probes could not be read for some unknown "
|
||||
"reason"
|
||||
)
|
||||
warnings.append(execution_data)
|
||||
return pd.DataFrame(
|
||||
columns = ["test", "variable", "values", "vfc_backend"]
|
||||
columns=["test", "variable", "values", "vfc_backend"]
|
||||
)
|
||||
|
||||
if len(results) == 0:
|
||||
print(
|
||||
"Warning [vfc_ci]: Probes empty, it looks like you have dumped " \
|
||||
"Warning [vfc_ci]: Probes empty, it looks like you have dumped "
|
||||
"them without calling vfc_put_probe"
|
||||
)
|
||||
warnings.append(execution_data)
|
||||
|
||||
|
||||
# Once the CSV has been opened and validated, return its content
|
||||
results["value"] = results["value"].apply(lambda x: float.fromhex(x))
|
||||
results.rename(columns = {"value":"values"}, inplace = True)
|
||||
results.rename(columns={"value": "values"}, inplace=True)
|
||||
|
||||
results["vfc_backend"] = backend
|
||||
|
||||
@ -75,6 +72,9 @@ def read_probes_csv(filepath, backend, warnings, execution_data):
|
||||
|
||||
def significant_digits(x):
|
||||
|
||||
# If the null hypothesis is rejected, call sigdigits with the General
|
||||
# formula:
|
||||
if x.pvalue < min_pvalue:
|
||||
# In a pandas DF, "values" actually refers to the array of columns, and
|
||||
# not the column named "values"
|
||||
distribution = x.values[3]
|
||||
@ -83,41 +83,31 @@ def significant_digits(x):
|
||||
# The distribution's empirical average will be used as the reference
|
||||
mu = np.array([x.mu])
|
||||
|
||||
# If the null hypothesis is rejected, call sigdigits with General mode:
|
||||
if x.pvalue < min_pvalue:
|
||||
method = sd.Method.General
|
||||
s = sd.significant_digits(
|
||||
distribution,
|
||||
mu,
|
||||
precision=sd.Precision.Absolute,
|
||||
method=method
|
||||
precision=sd.Precision.Relative,
|
||||
method=sd.Method.General,
|
||||
|
||||
probability=0.9,
|
||||
confidence=0.95
|
||||
)
|
||||
|
||||
|
||||
# Else, manually compute sMCA which is equivalent to a 66% confidence interval
|
||||
else:
|
||||
method = sd.Method.CNH
|
||||
s = sd.significant_digits(
|
||||
distribution,
|
||||
mu,
|
||||
precision=sd.Precision.Absolute,
|
||||
method=method,
|
||||
|
||||
probability=0.66,
|
||||
confidence=0.66,
|
||||
)
|
||||
|
||||
# s is returned as a size 1 list
|
||||
# s is returned inside a list
|
||||
return s[0]
|
||||
|
||||
# Else, manually compute sMCA (Stott-Parker formula)
|
||||
else:
|
||||
return -np.log2(np.absolute(x.sigma / x.mu))
|
||||
|
||||
|
||||
def significant_digits_lower_bound(x):
|
||||
# If the null hypothesis is rejected, no lower bound
|
||||
if x.pvalue < min_pvalue:
|
||||
return x.s2
|
||||
|
||||
# Else, the lower bound will be a 95% confidence interval
|
||||
|
||||
# Else, the lower bound will be computed with p= .9 alpha-1=.95
|
||||
else:
|
||||
distribution = x.values[3]
|
||||
distribution = distribution.reshape(len(distribution), 1)
|
||||
|
||||
@ -126,16 +116,17 @@ def significant_digits_lower_bound(x):
|
||||
s = sd.significant_digits(
|
||||
distribution,
|
||||
mu,
|
||||
precision=sd.Precision.Absolute,
|
||||
precision=sd.Precision.Relative,
|
||||
method=sd.Method.CNH,
|
||||
|
||||
probability=0.9,
|
||||
confidence=0.95
|
||||
)
|
||||
|
||||
return s[0]
|
||||
|
||||
|
||||
################################################################################
|
||||
|
||||
|
||||
##########################################################################
|
||||
|
||||
# Main functions
|
||||
|
||||
@ -154,7 +145,6 @@ def read_config():
|
||||
return json.loads(data)
|
||||
|
||||
|
||||
|
||||
# Set up metadata
|
||||
def generate_metadata(is_git_commit):
|
||||
|
||||
@ -167,7 +157,6 @@ def generate_metadata(is_git_commit):
|
||||
"message": ""
|
||||
}
|
||||
|
||||
|
||||
if is_git_commit:
|
||||
print("Fetching metadata from last commit...")
|
||||
from git import Repo
|
||||
@ -185,7 +174,6 @@ def generate_metadata(is_git_commit):
|
||||
return metadata
|
||||
|
||||
|
||||
|
||||
# Execute tests and collect results in a Pandas dataframe (+ dataprocessing)
|
||||
def run_tests(config):
|
||||
|
||||
@ -204,10 +192,10 @@ def run_tests(config):
|
||||
# not get any data
|
||||
warnings = []
|
||||
|
||||
|
||||
# Tests execution loop
|
||||
for executable in config["executables"]:
|
||||
print("Info [vfc_ci]: Running executable :", executable["executable"], "...")
|
||||
print("Info [vfc_ci]: Running executable :",
|
||||
executable["executable"], "...")
|
||||
|
||||
parameters = ""
|
||||
if "parameters" in executable:
|
||||
@ -245,26 +233,23 @@ def run_tests(config):
|
||||
|
||||
n_files = n_files + 1
|
||||
|
||||
|
||||
# Clean CSV output files (by deleting the tmp folder)
|
||||
os.system("rm -rf .vfcruns.tmp")
|
||||
|
||||
|
||||
# Combine all separate executions in one dataframe
|
||||
data = pd.concat(data, sort=False, ignore_index=True)
|
||||
data = data.groupby(["test", "vfc_backend", "variable"])\
|
||||
.values.apply(list).reset_index()
|
||||
|
||||
|
||||
# Make sure we have some data to work on
|
||||
assert(len(data) != 0), "Error [vfc_ci]: No data have been generated " \
|
||||
"by your tests executions, aborting run without writing results file"
|
||||
|
||||
return data, warnings
|
||||
|
||||
|
||||
|
||||
# Data processing
|
||||
|
||||
|
||||
def data_processing(data):
|
||||
|
||||
data["values"] = data["values"].apply(lambda x: np.array(x).astype(float))
|
||||
@ -272,8 +257,8 @@ def data_processing(data):
|
||||
# Get empirical average, standard deviation and p-value
|
||||
data["mu"] = data["values"].apply(np.average)
|
||||
data["sigma"] = data["values"].apply(np.std)
|
||||
data["pvalue"] = data["values"].apply(lambda x: scipy.stats.shapiro(x).pvalue)
|
||||
|
||||
data["pvalue"] = data["values"].apply(
|
||||
lambda x: scipy.stats.shapiro(x).pvalue)
|
||||
|
||||
# Significant digits
|
||||
data["s2"] = data.apply(significant_digits, axis=1)
|
||||
@ -281,8 +266,8 @@ def data_processing(data):
|
||||
|
||||
# Lower bound of the confidence interval using the sigdigits module
|
||||
data["s2_lower_bound"] = data.apply(significant_digits_lower_bound, axis=1)
|
||||
data["s10_lower_bound"] = data["s2_lower_bound"].apply(lambda x: sd.change_base(x, 10))
|
||||
|
||||
data["s10_lower_bound"] = data["s2_lower_bound"].apply(
|
||||
lambda x: sd.change_base(x, 10))
|
||||
|
||||
# Compute moments of the distribution
|
||||
# (including a new distribution obtained by filtering outliers)
|
||||
@ -297,13 +282,13 @@ def data_processing(data):
|
||||
|
||||
data["nsamples"] = data["values"].apply(len)
|
||||
|
||||
|
||||
|
||||
# Display all executions that resulted in a warning
|
||||
|
||||
|
||||
def show_warnings(warnings):
|
||||
if len(warnings) > 0:
|
||||
print(
|
||||
"Warning [vfc_ci]: Some of your runs could not generate any data " \
|
||||
"Warning [vfc_ci]: Some of your runs could not generate any data "
|
||||
"(for instance because your code crashed) and resulted in "
|
||||
"warnings. Here is the complete list :"
|
||||
)
|
||||
@ -316,9 +301,7 @@ def show_warnings(warnings):
|
||||
print(" Repetition: %s" % warnings[i]["repetition"])
|
||||
|
||||
|
||||
|
||||
################################################################################
|
||||
|
||||
##########################################################################
|
||||
|
||||
# Entry point
|
||||
|
||||
@ -334,54 +317,51 @@ def run(is_git_commit, export_raw_values, dry_run):
|
||||
data, warnings = run_tests(config)
|
||||
show_warnings(warnings)
|
||||
|
||||
|
||||
# Data processing
|
||||
print("Info [vfc_ci]: Processing data...")
|
||||
data_processing(data)
|
||||
|
||||
|
||||
# Prepare data for export (by creating a proper index and linking run timestamp)
|
||||
# Prepare data for export (by creating a proper index and linking run
|
||||
# timestamp)
|
||||
data = data.set_index(["test", "variable", "vfc_backend"]).sort_index()
|
||||
data["timestamp"] = metadata["timestamp"]
|
||||
|
||||
filename = metadata["hash"] if is_git_commit else str(metadata["timestamp"])
|
||||
|
||||
filename = metadata["hash"] if is_git_commit else str(
|
||||
metadata["timestamp"])
|
||||
|
||||
# Prepare metadata for export
|
||||
metadata = pd.DataFrame.from_dict([metadata])
|
||||
metadata = metadata.set_index("timestamp")
|
||||
|
||||
|
||||
# NOTE : Exporting to HDF5 requires to install "tables" on the system
|
||||
|
||||
# Export raw data if needed
|
||||
if export_raw_values and not dry_run:
|
||||
data.to_hdf(filename + ".vfcraw.hd5", key="data")
|
||||
metadata.to_hdf(filename + ".vfcraw.hd5", key="metadata")
|
||||
data.to_hdf(filename + ".vfcraw.h5", key="data")
|
||||
metadata.to_hdf(filename + ".vfcraw.h5", key="metadata")
|
||||
|
||||
# Export data
|
||||
del data["values"]
|
||||
if not dry_run:
|
||||
data.to_hdf(filename + ".vfcrun.hd5", key="data")
|
||||
metadata.to_hdf(filename + ".vfcrun.hd5", key="metadata")
|
||||
|
||||
data.to_hdf(filename + ".vfcrun.h5", key="data")
|
||||
metadata.to_hdf(filename + ".vfcrun.h5", key="metadata")
|
||||
|
||||
# Print termination messages
|
||||
print(
|
||||
"Info [vfc_ci]: The results have been successfully written to " \
|
||||
"%s.vfcrun.hd5." \
|
||||
"Info [vfc_ci]: The results have been successfully written to "
|
||||
"%s.vfcrun.h5."
|
||||
% filename
|
||||
)
|
||||
|
||||
if export_raw_values:
|
||||
print(
|
||||
"Info [vfc_ci]: A file containing the raw values has also been " \
|
||||
"created : %s.vfcraw.hd5."
|
||||
"Info [vfc_ci]: A file containing the raw values has also been "
|
||||
"created : %s.vfcraw.h5."
|
||||
% filename
|
||||
)
|
||||
|
||||
if dry_run:
|
||||
print(
|
||||
"Info [vfc_ci]: The dry run flag was enabled, so no files were " \
|
||||
"Info [vfc_ci]: The dry run flag was enabled, so no files were "
|
||||
"actually created."
|
||||
)
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -9,14 +9,13 @@ from math import pi
|
||||
from bokeh.plotting import figure, curdoc
|
||||
from bokeh.embed import components
|
||||
from bokeh.models import Select, ColumnDataSource, Panel, Tabs, HoverTool, \
|
||||
TextInput, CheckboxGroup, TapTool, CustomJS
|
||||
TextInput, CheckboxGroup, TapTool, CustomJS
|
||||
|
||||
import helper
|
||||
import plot
|
||||
|
||||
|
||||
################################################################################
|
||||
|
||||
##########################################################################
|
||||
|
||||
|
||||
class CompareRuns:
|
||||
@ -29,13 +28,13 @@ class CompareRuns:
|
||||
def gen_x_series(self, timestamps):
|
||||
|
||||
# Initialize the objects to return
|
||||
x_series= []
|
||||
x_series = []
|
||||
x_metadata = dict(
|
||||
date = [],
|
||||
is_git_commit = [],
|
||||
hash = [],
|
||||
author = [],
|
||||
message = []
|
||||
date=[],
|
||||
is_git_commit=[],
|
||||
hash=[],
|
||||
author=[],
|
||||
message=[]
|
||||
)
|
||||
|
||||
# n == 0 means we want all runs, we also make sure not to go out of
|
||||
@ -44,43 +43,40 @@ class CompareRuns:
|
||||
if n == 0 or n > len(timestamps):
|
||||
n = len(timestamps)
|
||||
|
||||
|
||||
for i in range(0, n):
|
||||
# Get metadata associated to this run
|
||||
row_metadata = helper.get_metadata(self.metadata, timestamps[-i-1])
|
||||
date = time.ctime(timestamps[-i-1])
|
||||
row_metadata = helper.get_metadata(
|
||||
self.metadata, timestamps[-i - 1])
|
||||
date = time.ctime(timestamps[-i - 1])
|
||||
|
||||
# Fill the x series
|
||||
str = row_metadata["name"]
|
||||
x_series.insert(0, helper.get_metadata(self.metadata, timestamps[-i-1])["name"])
|
||||
x_series.insert(0, helper.get_metadata(
|
||||
self.metadata, timestamps[-i - 1])["name"])
|
||||
|
||||
# Fill the metadata lists
|
||||
x_metadata["date"].insert(0, date)
|
||||
x_metadata["is_git_commit"].insert(0, row_metadata["is_git_commit"])
|
||||
x_metadata["is_git_commit"].insert(
|
||||
0, row_metadata["is_git_commit"])
|
||||
x_metadata["hash"].insert(0, row_metadata["hash"])
|
||||
x_metadata["author"].insert(0, row_metadata["author"])
|
||||
x_metadata["message"].insert(0, row_metadata["message"])
|
||||
|
||||
|
||||
return x_series, x_metadata
|
||||
|
||||
|
||||
|
||||
# Plots update function
|
||||
|
||||
def update_plots(self):
|
||||
|
||||
# Select all data matching current test/var/backend
|
||||
|
||||
runs = self.data.loc[
|
||||
[self.widgets["select_test"].value],
|
||||
self.widgets["select_var"].value, self.widgets["select_backend"].value
|
||||
]
|
||||
runs = self.data.loc[[self.widgets["select_test"].value],
|
||||
self.widgets["select_var"].value,
|
||||
self.widgets["select_backend"].value]
|
||||
|
||||
timestamps = runs["timestamp"]
|
||||
x_series, x_metadata = self.gen_x_series(timestamps.sort_values())
|
||||
|
||||
|
||||
# Update source
|
||||
|
||||
main_dict = runs.to_dict("series")
|
||||
@ -91,8 +87,7 @@ class CompareRuns:
|
||||
|
||||
# Select the last n runs only
|
||||
n = self.current_n_runs
|
||||
main_dict = {key:value[-n:] for key, value in main_dict.items()}
|
||||
|
||||
main_dict = {key: value[-n:] for key, value in main_dict.items()}
|
||||
|
||||
# Generate ColumnDataSources for the 3 dotplots
|
||||
for stat in ["sigma", "s10", "s2"]:
|
||||
@ -111,18 +106,20 @@ class CompareRuns:
|
||||
}
|
||||
|
||||
if stat == "s10" or stat == "s2":
|
||||
dict["%s_lower_bound" % stat] = main_dict["%s_lower_bound" % stat]
|
||||
dict["%s_lower_bound" %
|
||||
stat] = main_dict["%s_lower_bound" %
|
||||
stat]
|
||||
|
||||
# Filter outliers if the box is checked
|
||||
if len(self.widgets["outliers_filtering_compare"].active) > 0:
|
||||
outliers = helper.detect_outliers(dict[stat])
|
||||
dict[stat] = helper.remove_outliers(dict[stat], outliers)
|
||||
dict["%s_x" % stat] = helper.remove_outliers(dict["%s_x" % stat], outliers)
|
||||
dict["%s_x" % stat] = helper.remove_outliers(
|
||||
dict["%s_x" % stat], outliers)
|
||||
|
||||
# Assign ColumnDataSource
|
||||
self.sources["%s_source" % stat].data = dict
|
||||
|
||||
|
||||
# Generate ColumnDataSource for the boxplot
|
||||
dict = {
|
||||
"is_git_commit": main_dict["is_git_commit"],
|
||||
@ -132,40 +129,48 @@ class CompareRuns:
|
||||
"message": main_dict["message"],
|
||||
|
||||
"x": main_dict["x"],
|
||||
"min" : main_dict["min"],
|
||||
"quantile25" : main_dict["quantile25"],
|
||||
"quantile50" : main_dict["quantile50"],
|
||||
"quantile75" : main_dict["quantile75"],
|
||||
"max" : main_dict["max"],
|
||||
"mu" : main_dict["mu"],
|
||||
"pvalue" : main_dict["pvalue"],
|
||||
"min": main_dict["min"],
|
||||
"quantile25": main_dict["quantile25"],
|
||||
"quantile50": main_dict["quantile50"],
|
||||
"quantile75": main_dict["quantile75"],
|
||||
"max": main_dict["max"],
|
||||
"mu": main_dict["mu"],
|
||||
"pvalue": main_dict["pvalue"],
|
||||
|
||||
"nsamples": main_dict["nsamples"]
|
||||
}
|
||||
|
||||
|
||||
|
||||
self.sources["boxplot_source"].data = dict
|
||||
|
||||
# Update x axis
|
||||
|
||||
# Update x_ranges
|
||||
helper.reset_x_range(self.plots["boxplot"], self.sources["boxplot_source"].data["x"])
|
||||
helper.reset_x_range(self.plots["sigma_plot"], self.sources["sigma_source"].data["sigma_x"])
|
||||
helper.reset_x_range(self.plots["s10_plot"], self.sources["s10_source"].data["s10_x"])
|
||||
helper.reset_x_range(self.plots["s2_plot"], self.sources["s2_source"].data["s2_x"])
|
||||
|
||||
|
||||
|
||||
helper.reset_x_range(
|
||||
self.plots["boxplot"],
|
||||
self.sources["boxplot_source"].data["x"]
|
||||
)
|
||||
helper.reset_x_range(
|
||||
self.plots["sigma_plot"],
|
||||
self.sources["sigma_source"].data["sigma_x"]
|
||||
)
|
||||
helper.reset_x_range(
|
||||
self.plots["s10_plot"],
|
||||
self.sources["s10_source"].data["s10_x"]
|
||||
)
|
||||
helper.reset_x_range(
|
||||
self.plots["s2_plot"],
|
||||
self.sources["s2_source"].data["s2_x"]
|
||||
)
|
||||
|
||||
# Widgets' callback functions
|
||||
|
||||
def update_test(self, attrname, old, new):
|
||||
|
||||
# If the value is updated by the CustomJS, self.widgets["select_var"].value
|
||||
# won't be updated, so we have to look for that case and assign it manually
|
||||
# won't be updated, so we have to look for that case and assign it
|
||||
# manually
|
||||
|
||||
# "new" should be a list when updated by CustomJS
|
||||
if type(new) == list:
|
||||
if isinstance(new, list):
|
||||
# If filtering removed all options, we might have an empty list
|
||||
# (in this case, we just skip the callback and do nothing)
|
||||
if len(new) > 0:
|
||||
@ -183,7 +188,6 @@ class CompareRuns:
|
||||
.index.get_level_values("variable").drop_duplicates().tolist()
|
||||
self.widgets["select_var"].options = self.vars
|
||||
|
||||
|
||||
# Reset var selection if old one is not available in new vars
|
||||
if self.widgets["select_var"].value not in self.vars:
|
||||
self.widgets["select_var"].value = self.vars[0]
|
||||
@ -194,14 +198,14 @@ class CompareRuns:
|
||||
# anyway)
|
||||
self.update_var("", "", self.widgets["select_var"].value)
|
||||
|
||||
|
||||
def update_var(self, attrname, old, new):
|
||||
|
||||
# If the value is updated by the CustomJS, self.widgets["select_var"].value
|
||||
# won't be updated, so we have to look for that case and assign it manually
|
||||
# won't be updated, so we have to look for that case and assign it
|
||||
# manually
|
||||
|
||||
# new should be a list when updated by CustomJS
|
||||
if type(new) == list:
|
||||
if isinstance(new, list):
|
||||
new = new[0]
|
||||
|
||||
if new != self.widgets["select_var"].value:
|
||||
@ -209,7 +213,6 @@ class CompareRuns:
|
||||
self.widgets["select_var"].value = new
|
||||
return
|
||||
|
||||
|
||||
# New list of available backends
|
||||
self.backends = self.data.loc[self.widgets["select_test"].value, self.widgets["select_var"].value]\
|
||||
.index.get_level_values("vfc_backend").drop_duplicates().tolist()
|
||||
@ -225,13 +228,11 @@ class CompareRuns:
|
||||
# anyway)
|
||||
self.update_backend("", "", self.widgets["select_backend"].value)
|
||||
|
||||
|
||||
def update_backend(self, attrname, old, new):
|
||||
|
||||
# Simply update plots, since no other data is affected
|
||||
self.update_plots()
|
||||
|
||||
|
||||
def update_n_runs(self, attrname, old, new):
|
||||
# Simply update runs selection (value and string display)
|
||||
self.select_n_runs.value = new
|
||||
@ -239,12 +240,9 @@ class CompareRuns:
|
||||
|
||||
self.update_plots()
|
||||
|
||||
|
||||
def update_outliers_filtering(self, attrname, old, new):
|
||||
self.update_plots()
|
||||
|
||||
|
||||
|
||||
# Bokeh setup functions
|
||||
|
||||
def setup_plots(self):
|
||||
@ -256,7 +254,6 @@ class CompareRuns:
|
||||
# (defined inside template to avoid bloating server w/ too much JS code)
|
||||
js_tap_callback = "goToInspectRuns();"
|
||||
|
||||
|
||||
# Box plot
|
||||
self.plots["boxplot"] = figure(
|
||||
name="boxplot", title="Variable distribution over runs",
|
||||
@ -280,24 +277,23 @@ class CompareRuns:
|
||||
("Number of samples", "@nsamples")
|
||||
]
|
||||
box_tooltips_formatters = {
|
||||
"@min" : "printf",
|
||||
"@max" : "printf",
|
||||
"@quantile25" : "printf",
|
||||
"@quantile50" : "printf",
|
||||
"@quantile75" : "printf",
|
||||
"@mu" : "printf"
|
||||
"@min": "printf",
|
||||
"@max": "printf",
|
||||
"@quantile25": "printf",
|
||||
"@quantile50": "printf",
|
||||
"@quantile75": "printf",
|
||||
"@mu": "printf"
|
||||
}
|
||||
|
||||
plot.fill_boxplot(
|
||||
self.plots["boxplot"], self.sources["boxplot_source"],
|
||||
tooltips = box_tooltips,
|
||||
tooltips_formatters = box_tooltips_formatters,
|
||||
js_tap_callback = js_tap_callback,
|
||||
server_tap_callback = self.inspect_run_callback_boxplot,
|
||||
tooltips=box_tooltips,
|
||||
tooltips_formatters=box_tooltips_formatters,
|
||||
js_tap_callback=js_tap_callback,
|
||||
server_tap_callback=self.inspect_run_callback_boxplot,
|
||||
)
|
||||
self.doc.add_root(self.plots["boxplot"])
|
||||
|
||||
|
||||
# Sigma plot (bar plot)
|
||||
self.plots["sigma_plot"] = figure(
|
||||
name="sigma_plot", title="Standard deviation σ over runs",
|
||||
@ -317,14 +313,13 @@ class CompareRuns:
|
||||
|
||||
plot.fill_dotplot(
|
||||
self.plots["sigma_plot"], self.sources["sigma_source"], "sigma",
|
||||
tooltips = sigma_tooltips,
|
||||
js_tap_callback = js_tap_callback,
|
||||
server_tap_callback = self.inspect_run_callback_sigma,
|
||||
lines = True
|
||||
tooltips=sigma_tooltips,
|
||||
js_tap_callback=js_tap_callback,
|
||||
server_tap_callback=self.inspect_run_callback_sigma,
|
||||
lines=True
|
||||
)
|
||||
self.doc.add_root(self.plots["sigma_plot"])
|
||||
|
||||
|
||||
# s plot (bar plot with 2 tabs)
|
||||
self.plots["s10_plot"] = figure(
|
||||
name="s10_plot", title="Significant digits s over runs",
|
||||
@ -345,15 +340,14 @@ class CompareRuns:
|
||||
|
||||
plot.fill_dotplot(
|
||||
self.plots["s10_plot"], self.sources["s10_source"], "s10",
|
||||
tooltips = s10_tooltips,
|
||||
js_tap_callback = js_tap_callback,
|
||||
server_tap_callback = self.inspect_run_callback_s10,
|
||||
lines = True,
|
||||
tooltips=s10_tooltips,
|
||||
js_tap_callback=js_tap_callback,
|
||||
server_tap_callback=self.inspect_run_callback_s10,
|
||||
lines=True,
|
||||
lower_bound=True
|
||||
)
|
||||
s10_tab = Panel(child=self.plots["s10_plot"], title="Base 10")
|
||||
|
||||
|
||||
self.plots["s2_plot"] = figure(
|
||||
name="s2_plot", title="Significant digits s over runs",
|
||||
plot_width=900, plot_height=400, x_range=[""],
|
||||
@ -373,23 +367,22 @@ class CompareRuns:
|
||||
|
||||
plot.fill_dotplot(
|
||||
self.plots["s2_plot"], self.sources["s2_source"], "s2",
|
||||
tooltips = s2_tooltips,
|
||||
js_tap_callback = js_tap_callback,
|
||||
server_tap_callback = self.inspect_run_callback_s2,
|
||||
lines = True,
|
||||
tooltips=s2_tooltips,
|
||||
js_tap_callback=js_tap_callback,
|
||||
server_tap_callback=self.inspect_run_callback_s2,
|
||||
lines=True,
|
||||
lower_bound=True
|
||||
)
|
||||
s2_tab = Panel(child=self.plots["s2_plot"], title="Base 2")
|
||||
|
||||
s_tabs = Tabs(
|
||||
name = "s_tabs",
|
||||
name="s_tabs",
|
||||
tabs=[s10_tab, s2_tab],
|
||||
tabs_location = "below"
|
||||
tabs_location="below"
|
||||
)
|
||||
|
||||
self.doc.add_root(s_tabs)
|
||||
|
||||
|
||||
def setup_widgets(self):
|
||||
|
||||
# Initial selections
|
||||
@ -404,13 +397,11 @@ class CompareRuns:
|
||||
self.backends = self.data.loc[self.tests[0], self.vars[0]]\
|
||||
.index.get_level_values("vfc_backend").drop_duplicates().tolist()
|
||||
|
||||
|
||||
# Custom JS callback that will be used client side to filter selections
|
||||
filter_callback_js = """
|
||||
selector.options = options.filter(e => e.includes(cb_obj.value));
|
||||
"""
|
||||
|
||||
|
||||
# Test selector widget
|
||||
|
||||
# Number of runs to display
|
||||
@ -442,13 +433,15 @@ class CompareRuns:
|
||||
self.widgets["test_filter"] = TextInput(
|
||||
name="test_filter", title="Tests filter:"
|
||||
)
|
||||
self.widgets["test_filter"].js_on_change("value", CustomJS(
|
||||
args=dict(options=self.tests, selector=self.widgets["select_test"]),
|
||||
code=filter_callback_js
|
||||
))
|
||||
self.widgets["test_filter"].js_on_change(
|
||||
"value",
|
||||
CustomJS(
|
||||
args=dict(
|
||||
options=self.tests,
|
||||
selector=self.widgets["select_test"]),
|
||||
code=filter_callback_js))
|
||||
self.doc.add_root(self.widgets["test_filter"])
|
||||
|
||||
|
||||
# Number of runs to display
|
||||
|
||||
self.widgets["select_n_runs"] = Select(
|
||||
@ -458,7 +451,6 @@ class CompareRuns:
|
||||
self.doc.add_root(self.widgets["select_n_runs"])
|
||||
self.widgets["select_n_runs"].on_change("value", self.update_n_runs)
|
||||
|
||||
|
||||
# Variable selector widget
|
||||
|
||||
self.widgets["select_var"] = Select(
|
||||
@ -469,7 +461,6 @@ class CompareRuns:
|
||||
self.widgets["select_var"].on_change("value", self.update_var)
|
||||
self.widgets["select_var"].on_change("options", self.update_var)
|
||||
|
||||
|
||||
# Backend selector widget
|
||||
|
||||
self.widgets["select_backend"] = Select(
|
||||
@ -479,23 +470,21 @@ class CompareRuns:
|
||||
self.doc.add_root(self.widgets["select_backend"])
|
||||
self.widgets["select_backend"].on_change("value", self.update_backend)
|
||||
|
||||
|
||||
# Outliers filtering checkbox
|
||||
|
||||
self.widgets["outliers_filtering_compare"] = CheckboxGroup(
|
||||
name="outliers_filtering_compare",
|
||||
labels=["Filter outliers"], active =[]
|
||||
labels=["Filter outliers"], active=[]
|
||||
)
|
||||
self.doc.add_root(self.widgets["outliers_filtering_compare"])
|
||||
self.widgets["outliers_filtering_compare"]\
|
||||
.on_change("active", self.update_outliers_filtering)
|
||||
|
||||
|
||||
|
||||
# Communication methods
|
||||
# (to send/receive messages to/from master)
|
||||
|
||||
# Callback to change view of Inspect runs when data is selected
|
||||
|
||||
def inspect_run_callback(self, new, source_name, x_name):
|
||||
|
||||
# In case we just unselected everything, then do nothing
|
||||
@ -507,7 +496,6 @@ class CompareRuns:
|
||||
|
||||
self.master.go_to_inspect(run_name)
|
||||
|
||||
|
||||
# Wrappers for each plot (since new is the index of the clicked element,
|
||||
# it is dependent of the plot because we could have filtered some outliers)
|
||||
# There doesn't seem to be an easy way to add custom parameters to a
|
||||
@ -525,7 +513,6 @@ class CompareRuns:
|
||||
def inspect_run_callback_s10(self, attr, old, new):
|
||||
self.inspect_run_callback(new, "s10_source", "s10_x")
|
||||
|
||||
|
||||
# Constructor
|
||||
|
||||
def __init__(self, master, doc, data, metadata):
|
||||
@ -536,11 +523,10 @@ class CompareRuns:
|
||||
self.data = data
|
||||
self.metadata = metadata
|
||||
|
||||
|
||||
self.sources = {
|
||||
"boxplot_source": ColumnDataSource(data={}),
|
||||
"sigma_source": ColumnDataSource(data={}),
|
||||
"s10_source" :ColumnDataSource(data={}),
|
||||
"s10_source": ColumnDataSource(data={}),
|
||||
"s2_source": ColumnDataSource(data={})
|
||||
}
|
||||
|
||||
|
@ -10,7 +10,7 @@ import numpy as np
|
||||
max_ticks = 15
|
||||
max_zscore = 3
|
||||
|
||||
################################################################################
|
||||
##########################################################################
|
||||
|
||||
|
||||
# From a timestamp, return the associated metadata as a Pandas serie
|
||||
@ -39,7 +39,6 @@ def get_run_name(timestamp, hash):
|
||||
now = calendar.timegm(gmt)
|
||||
diff = now - timestamp
|
||||
|
||||
|
||||
# Special case : < 1 minute (return string directly)
|
||||
if diff < 60:
|
||||
str = "Less than a minute ago"
|
||||
@ -83,12 +82,10 @@ def get_run_name(timestamp, hash):
|
||||
|
||||
str = str % (n, plural)
|
||||
|
||||
|
||||
# We might want to add the git hash
|
||||
if hash != "":
|
||||
str = str + " (%s)" % hash
|
||||
|
||||
|
||||
# Finally, check for duplicate with previously generated string
|
||||
if str == get_run_name.previous:
|
||||
# Increment the duplicate counter and add it to str
|
||||
@ -96,12 +93,14 @@ def get_run_name(timestamp, hash):
|
||||
str = "%s (%s)" % (str, get_run_name.counter)
|
||||
|
||||
else:
|
||||
# No duplicate, reset both previously generated str and duplicate counter
|
||||
# No duplicate, reset both previously generated str and duplicate
|
||||
# counter
|
||||
get_run_name.counter = 0
|
||||
get_run_name.previous = str
|
||||
|
||||
return str
|
||||
|
||||
|
||||
# These external variables will store data about the last generated string to
|
||||
# avoid duplicates (assuming the runs are sorted by time)
|
||||
get_run_name.counter = 0
|
||||
@ -156,11 +155,16 @@ def remove_boxplot_outliers(dict, outliers, prefix):
|
||||
|
||||
dict["%s_x" % prefix] = remove_outliers(dict["%s_x" % prefix], outliers)
|
||||
|
||||
dict["%s_min" % prefix] = remove_outliers(dict["%s_min" % prefix], outliers)
|
||||
dict["%s_quantile25" % prefix] = remove_outliers(dict["%s_quantile25" % prefix], outliers)
|
||||
dict["%s_quantile50" % prefix] = remove_outliers(dict["%s_quantile50" % prefix], outliers)
|
||||
dict["%s_quantile75" % prefix] = remove_outliers(dict["%s_quantile75" % prefix], outliers)
|
||||
dict["%s_max" % prefix] = remove_outliers(dict["%s_max" % prefix], outliers)
|
||||
dict["%s_min" % prefix] = remove_outliers(
|
||||
dict["%s_min" % prefix], outliers)
|
||||
dict["%s_quantile25" % prefix] = remove_outliers(
|
||||
dict["%s_quantile25" % prefix], outliers)
|
||||
dict["%s_quantile50" % prefix] = remove_outliers(
|
||||
dict["%s_quantile50" % prefix], outliers)
|
||||
dict["%s_quantile75" % prefix] = remove_outliers(
|
||||
dict["%s_quantile75" % prefix], outliers)
|
||||
dict["%s_max" % prefix] = remove_outliers(
|
||||
dict["%s_max" % prefix], outliers)
|
||||
dict["%s_mu" % prefix] = remove_outliers(dict["%s_mu" % prefix], outliers)
|
||||
|
||||
dict["nsamples"] = remove_outliers(dict["nsamples"], outliers)
|
||||
|
@ -9,14 +9,13 @@ import numpy as np
|
||||
from bokeh.plotting import figure, curdoc
|
||||
from bokeh.embed import components
|
||||
from bokeh.models import Select, ColumnDataSource, Panel, Tabs, HoverTool,\
|
||||
RadioButtonGroup, CheckboxGroup, CustomJS
|
||||
RadioButtonGroup, CheckboxGroup, CustomJS
|
||||
|
||||
import helper
|
||||
import plot
|
||||
|
||||
|
||||
################################################################################
|
||||
|
||||
##########################################################################
|
||||
|
||||
|
||||
class InspectRuns:
|
||||
@ -40,7 +39,6 @@ class InspectRuns:
|
||||
|
||||
return runs_dict
|
||||
|
||||
|
||||
def gen_boxplot_tooltips(self, prefix):
|
||||
return [
|
||||
("Name", "@%s_x" % prefix),
|
||||
@ -55,34 +53,35 @@ class InspectRuns:
|
||||
|
||||
def gen_boxplot_tooltips_formatters(self, prefix):
|
||||
return {
|
||||
"@%s_min" % prefix : "printf",
|
||||
"@%s_max" % prefix : "printf",
|
||||
"@%s_quantile25" % prefix : "printf",
|
||||
"@%s_quantile50" % prefix : "printf",
|
||||
"@%s_quantile75" % prefix : "printf",
|
||||
"@%s_mu" % prefix : "printf"
|
||||
"@%s_min" % prefix: "printf",
|
||||
"@%s_max" % prefix: "printf",
|
||||
"@%s_quantile25" % prefix: "printf",
|
||||
"@%s_quantile50" % prefix: "printf",
|
||||
"@%s_quantile75" % prefix: "printf",
|
||||
"@%s_mu" % prefix: "printf"
|
||||
}
|
||||
|
||||
|
||||
# Data processing helper
|
||||
# (computes new distributions for sigma, s2, s10)
|
||||
|
||||
def data_processing(self, dataframe):
|
||||
|
||||
# Compute aggragated mu
|
||||
dataframe["mu"] = np.vectorize(np.average)(dataframe["mu"], weights=dataframe["nsamples"])
|
||||
dataframe["mu"] = np.vectorize(
|
||||
np.average)(
|
||||
dataframe["mu"],
|
||||
weights=dataframe["nsamples"])
|
||||
|
||||
# nsamples is the number of aggregated elements (as well as the number
|
||||
# of samples for our new sigma and s distributions)
|
||||
dataframe["nsamples"] = dataframe["nsamples"].apply(lambda x: len(x))
|
||||
|
||||
|
||||
dataframe["mu_x"] = dataframe.index
|
||||
# Make sure that strings don't excede a certain length
|
||||
dataframe["mu_x"] = dataframe["mu_x"].apply(
|
||||
lambda x: x[:17] + "[...]" + x[-17:] if len(x) > 39 else x
|
||||
)
|
||||
|
||||
|
||||
# Get quantiles and mu for sigma, s10, s2
|
||||
for prefix in ["sigma", "s10", "s2"]:
|
||||
|
||||
@ -91,18 +90,18 @@ class InspectRuns:
|
||||
dataframe[prefix] = dataframe[prefix].apply(np.sort)
|
||||
|
||||
dataframe["%s_min" % prefix] = dataframe[prefix].apply(np.min)
|
||||
dataframe["%s_quantile25" % prefix] = dataframe[prefix].apply(np.quantile, args=(0.25,))
|
||||
dataframe["%s_quantile50" % prefix] = dataframe[prefix].apply(np.quantile, args=(0.50,))
|
||||
dataframe["%s_quantile75" % prefix] = dataframe[prefix].apply(np.quantile, args=(0.75,))
|
||||
dataframe["%s_quantile25" % prefix] = dataframe[prefix].apply(
|
||||
np.quantile, args=(0.25,))
|
||||
dataframe["%s_quantile50" % prefix] = dataframe[prefix].apply(
|
||||
np.quantile, args=(0.50,))
|
||||
dataframe["%s_quantile75" % prefix] = dataframe[prefix].apply(
|
||||
np.quantile, args=(0.75,))
|
||||
dataframe["%s_max" % prefix] = dataframe[prefix].apply(np.max)
|
||||
dataframe["%s_mu" % prefix] = dataframe[prefix].apply(np.average)
|
||||
del dataframe[prefix]
|
||||
|
||||
|
||||
return dataframe
|
||||
|
||||
|
||||
|
||||
# Plots update function
|
||||
|
||||
def update_plots(self):
|
||||
@ -117,7 +116,6 @@ class InspectRuns:
|
||||
]
|
||||
filterby = self.factors_dict[filterby_display]
|
||||
|
||||
|
||||
# Groupby and aggregate lines belonging to the same group in lists
|
||||
|
||||
groups = self.run_data[
|
||||
@ -131,17 +129,16 @@ class InspectRuns:
|
||||
"sigma": lambda x: x.tolist(),
|
||||
"s10": lambda x: x.tolist(),
|
||||
"s2": lambda x: x.tolist(),
|
||||
|
||||
"mu": lambda x: x.tolist(),
|
||||
|
||||
# Used for mu weighted average first, then will be replaced
|
||||
"nsamples": lambda x: x.tolist()
|
||||
})
|
||||
|
||||
|
||||
# Compute the new distributions, ...
|
||||
groups = self.data_processing(groups).to_dict("list")
|
||||
|
||||
|
||||
# Update source
|
||||
|
||||
# Assign each ColumnDataSource, starting with the boxplots
|
||||
@ -166,7 +163,8 @@ class InspectRuns:
|
||||
top_outliers = helper.detect_outliers(dict["%s_max" % prefix])
|
||||
helper.remove_boxplot_outliers(dict, top_outliers, prefix)
|
||||
|
||||
bottom_outliers = helper.detect_outliers(dict["%s_min" % prefix])
|
||||
bottom_outliers = helper.detect_outliers(
|
||||
dict["%s_min" % prefix])
|
||||
helper.remove_boxplot_outliers(dict, bottom_outliers, prefix)
|
||||
|
||||
self.sources["%s_source" % prefix].data = dict
|
||||
@ -185,8 +183,8 @@ class InspectRuns:
|
||||
if len(self.widgets["outliers_filtering_inspect"].active) > 0:
|
||||
mu_outliers = helper.detect_outliers(groups["mu"])
|
||||
groups["mu"] = helper.remove_outliers(groups["mu"], mu_outliers)
|
||||
groups["mu_x"] = helper.remove_outliers(groups["mu_x"], mu_outliers)
|
||||
|
||||
groups["mu_x"] = helper.remove_outliers(
|
||||
groups["mu_x"], mu_outliers)
|
||||
|
||||
# Update plots axis/titles
|
||||
|
||||
@ -194,42 +192,38 @@ class InspectRuns:
|
||||
factors_dict = self.factors_dict.copy()
|
||||
del factors_dict[groupby_display]
|
||||
del factors_dict[filterby_display]
|
||||
over_all = list(factors_dict.keys())[0]
|
||||
for_all = list(factors_dict.keys())[0]
|
||||
|
||||
# Update all display strings for plot title (remove caps, plural)
|
||||
groupby_display = groupby_display.lower()
|
||||
filterby_display = filterby_display.lower()[:-1]
|
||||
over_all = over_all.lower()
|
||||
for_all = for_all.lower()
|
||||
|
||||
self.plots["mu_inspect"].title.text = \
|
||||
"Empirical average μ of %s (groupped by %s, for all %s)" \
|
||||
% (filterby_display, groupby_display, over_all)
|
||||
% (filterby_display, groupby_display, for_all)
|
||||
|
||||
self.plots["sigma_inspect"].title.text = \
|
||||
"Standard deviation σ of %s (groupped by %s, for all %s)" \
|
||||
% (filterby_display, groupby_display, over_all)
|
||||
% (filterby_display, groupby_display, for_all)
|
||||
|
||||
self.plots["s10_inspect"].title.text = \
|
||||
"Significant digits s of %s (groupped by %s, for all %s)" \
|
||||
% (filterby_display, groupby_display, over_all)
|
||||
% (filterby_display, groupby_display, for_all)
|
||||
|
||||
self.plots["s2_inspect"].title.text = \
|
||||
"Significant digits s of %s (groupped by %s, for all %s)" \
|
||||
% (filterby_display, groupby_display, over_all)
|
||||
|
||||
|
||||
# Update x_ranges
|
||||
% (filterby_display, groupby_display, for_all)
|
||||
|
||||
helper.reset_x_range(self.plots["mu_inspect"], groups["mu_x"])
|
||||
helper.reset_x_range(self.plots["sigma_inspect"], groups["sigma_x"])
|
||||
helper.reset_x_range(self.plots["s10_inspect"], groups["s10_x"])
|
||||
helper.reset_x_range(self.plots["s2_inspect"], groups["s2_x"])
|
||||
|
||||
|
||||
|
||||
# Widets' callback functions
|
||||
|
||||
# Run selector callback
|
||||
|
||||
def update_run(self, attrname, old, new):
|
||||
|
||||
filterby = self.widgets["filterby_radio"].labels[
|
||||
@ -260,8 +254,8 @@ class InspectRuns:
|
||||
# anyway)
|
||||
self.update_filter("", "", old_value)
|
||||
|
||||
|
||||
# "Group by" radio
|
||||
|
||||
def update_groupby(self, attrname, old, new):
|
||||
|
||||
# Update "Filter by" radio list
|
||||
@ -269,7 +263,6 @@ class InspectRuns:
|
||||
del filterby_list[self.widgets["groupby_radio"].active]
|
||||
self.widgets["filterby_radio"].labels = filterby_list
|
||||
|
||||
|
||||
filterby = self.widgets["filterby_radio"].labels[
|
||||
self.widgets["filterby_radio"].active
|
||||
]
|
||||
@ -292,8 +285,8 @@ class InspectRuns:
|
||||
# anyway)
|
||||
self.update_filter("", "", old_value)
|
||||
|
||||
|
||||
# "Filter by" radio
|
||||
|
||||
def update_filterby(self, attrname, old, new):
|
||||
|
||||
filterby = self.widgets["filterby_radio"].labels[
|
||||
@ -318,20 +311,18 @@ class InspectRuns:
|
||||
# anyway)
|
||||
self.update_filter("", "", old_value)
|
||||
|
||||
|
||||
# Filter selector callback
|
||||
|
||||
def update_filter(self, attrname, old, new):
|
||||
self.update_plots()
|
||||
|
||||
|
||||
# Filter outliers checkbox callback
|
||||
|
||||
def update_outliers_filtering(self, attrname, old, new):
|
||||
# The status (checked/unchecked) of the checkbox is also verified inside
|
||||
# self.update_plots(), so calling this function is enough
|
||||
self.update_plots()
|
||||
|
||||
|
||||
|
||||
# Bokeh setup functions
|
||||
# (for both variable and backend selection at once)
|
||||
|
||||
@ -339,7 +330,6 @@ class InspectRuns:
|
||||
|
||||
tools = "pan, wheel_zoom, xwheel_zoom, ywheel_zoom, reset, save"
|
||||
|
||||
|
||||
# Tooltips and formatters
|
||||
|
||||
dotplot_tooltips = [
|
||||
@ -348,18 +338,20 @@ class InspectRuns:
|
||||
("Number of samples (tests)", "@nsamples")
|
||||
]
|
||||
dotplot_formatters = {
|
||||
"@mu" : "printf"
|
||||
"@mu": "printf"
|
||||
}
|
||||
|
||||
sigma_boxplot_tooltips = self.gen_boxplot_tooltips("sigma")
|
||||
sigma_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters("sigma")
|
||||
sigma_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters(
|
||||
"sigma")
|
||||
|
||||
s10_boxplot_tooltips = self.gen_boxplot_tooltips("s10")
|
||||
s10_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters("s10")
|
||||
s10_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters(
|
||||
"s10")
|
||||
|
||||
s2_boxplot_tooltips = self.gen_boxplot_tooltips("s2")
|
||||
s2_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters("s2")
|
||||
|
||||
s2_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters(
|
||||
"s2")
|
||||
|
||||
# Plots
|
||||
|
||||
@ -372,12 +364,11 @@ class InspectRuns:
|
||||
)
|
||||
plot.fill_dotplot(
|
||||
self.plots["mu_inspect"], self.sources["mu_source"], "mu",
|
||||
tooltips = dotplot_tooltips,
|
||||
tooltips_formatters = dotplot_formatters
|
||||
tooltips=dotplot_tooltips,
|
||||
tooltips_formatters=dotplot_formatters
|
||||
)
|
||||
self.doc.add_root(self.plots["mu_inspect"])
|
||||
|
||||
|
||||
# Sigma plot
|
||||
self.plots["sigma_inspect"] = figure(
|
||||
name="sigma_inspect",
|
||||
@ -386,13 +377,13 @@ class InspectRuns:
|
||||
tools=tools, sizing_mode="scale_width"
|
||||
)
|
||||
plot.fill_boxplot(
|
||||
self.plots["sigma_inspect"], self.sources["sigma_source"], prefix="sigma",
|
||||
tooltips = sigma_boxplot_tooltips,
|
||||
tooltips_formatters = sigma_boxplot_tooltips_formatters
|
||||
)
|
||||
self.plots["sigma_inspect"],
|
||||
self.sources["sigma_source"],
|
||||
prefix="sigma",
|
||||
tooltips=sigma_boxplot_tooltips,
|
||||
tooltips_formatters=sigma_boxplot_tooltips_formatters)
|
||||
self.doc.add_root(self.plots["sigma_inspect"])
|
||||
|
||||
|
||||
# s plots
|
||||
self.plots["s10_inspect"] = figure(
|
||||
name="s10_inspect",
|
||||
@ -401,11 +392,14 @@ class InspectRuns:
|
||||
tools=tools, sizing_mode='scale_width'
|
||||
)
|
||||
plot.fill_boxplot(
|
||||
self.plots["s10_inspect"], self.sources["s10_source"], prefix="s10",
|
||||
tooltips = s10_boxplot_tooltips,
|
||||
tooltips_formatters = s10_boxplot_tooltips_formatters
|
||||
)
|
||||
s10_tab_inspect = Panel(child=self.plots["s10_inspect"], title="Base 10")
|
||||
self.plots["s10_inspect"],
|
||||
self.sources["s10_source"],
|
||||
prefix="s10",
|
||||
tooltips=s10_boxplot_tooltips,
|
||||
tooltips_formatters=s10_boxplot_tooltips_formatters)
|
||||
s10_tab_inspect = Panel(
|
||||
child=self.plots["s10_inspect"],
|
||||
title="Base 10")
|
||||
|
||||
self.plots["s2_inspect"] = figure(
|
||||
name="s2_inspect",
|
||||
@ -415,19 +409,17 @@ class InspectRuns:
|
||||
)
|
||||
plot.fill_boxplot(
|
||||
self.plots["s2_inspect"], self.sources["s2_source"], prefix="s2",
|
||||
tooltips = s2_boxplot_tooltips,
|
||||
tooltips_formatters = s2_boxplot_tooltips_formatters
|
||||
tooltips=s2_boxplot_tooltips,
|
||||
tooltips_formatters=s2_boxplot_tooltips_formatters
|
||||
)
|
||||
s2_tab_inspect = Panel(child=self.plots["s2_inspect"], title="Base 2")
|
||||
|
||||
s_tabs_inspect = Tabs(
|
||||
name = "s_tabs_inspect",
|
||||
tabs=[s10_tab_inspect, s2_tab_inspect], tabs_location = "below"
|
||||
name="s_tabs_inspect",
|
||||
tabs=[s10_tab_inspect, s2_tab_inspect], tabs_location="below"
|
||||
)
|
||||
self.doc.add_root(s_tabs_inspect)
|
||||
|
||||
|
||||
|
||||
def setup_widgets(self):
|
||||
|
||||
# Generation of selectable items
|
||||
@ -445,7 +437,6 @@ class InspectRuns:
|
||||
"Tests": "test"
|
||||
}
|
||||
|
||||
|
||||
# Run selection
|
||||
|
||||
# Contains all options strings
|
||||
@ -457,8 +448,7 @@ class InspectRuns:
|
||||
# This contains only entries matching the run
|
||||
self.run_data = self.data[self.data["timestamp"] == self.current_run]
|
||||
|
||||
|
||||
change_run_callback_js="updateRunMetadata(cb_obj.value);"
|
||||
change_run_callback_js = "updateRunMetadata(cb_obj.value);"
|
||||
|
||||
self.widgets["select_run"] = Select(
|
||||
name="select_run", title="Run :",
|
||||
@ -467,7 +457,7 @@ class InspectRuns:
|
||||
self.doc.add_root(self.widgets["select_run"])
|
||||
self.widgets["select_run"].on_change("value", self.update_run)
|
||||
self.widgets["select_run"].js_on_change("value", CustomJS(
|
||||
code = change_run_callback_js,
|
||||
code=change_run_callback_js,
|
||||
args=(dict(
|
||||
metadata=helper.metadata_to_dict(
|
||||
helper.get_metadata(self.metadata, self.current_run)
|
||||
@ -475,7 +465,6 @@ class InspectRuns:
|
||||
))
|
||||
))
|
||||
|
||||
|
||||
# Factors selection
|
||||
|
||||
# "Group by" radio
|
||||
@ -491,7 +480,6 @@ class InspectRuns:
|
||||
self.update_groupby
|
||||
)
|
||||
|
||||
|
||||
# "Filter by" radio
|
||||
# Get all possible factors, and remove the one selected in "Group by"
|
||||
filterby_list = list(self.factors_dict.keys())
|
||||
@ -509,7 +497,6 @@ class InspectRuns:
|
||||
self.update_filterby
|
||||
)
|
||||
|
||||
|
||||
# Filter selector
|
||||
|
||||
filterby = self.widgets["filterby_radio"].labels[
|
||||
@ -530,28 +517,24 @@ class InspectRuns:
|
||||
self.widgets["select_filter"]\
|
||||
.on_change("value", self.update_filter)
|
||||
|
||||
|
||||
# Toggle for outliers filtering
|
||||
|
||||
self.widgets["outliers_filtering_inspect"] = CheckboxGroup(
|
||||
name="outliers_filtering_inspect",
|
||||
labels=["Filter outliers"], active = []
|
||||
labels=["Filter outliers"], active=[]
|
||||
)
|
||||
self.doc.add_root(self.widgets["outliers_filtering_inspect"])
|
||||
self.widgets["outliers_filtering_inspect"]\
|
||||
.on_change("active", self.update_outliers_filtering)
|
||||
|
||||
|
||||
|
||||
# Communication methods
|
||||
# (to send/receive messages to/from master)
|
||||
|
||||
# When received, switch to the run_name in parameter
|
||||
|
||||
def switch_view(self, run_name):
|
||||
self.widgets["select_run"].value = run_name
|
||||
|
||||
|
||||
|
||||
# Constructor
|
||||
|
||||
def __init__(self, master, doc, data, metadata):
|
||||
@ -562,11 +545,10 @@ class InspectRuns:
|
||||
self.data = data
|
||||
self.metadata = metadata
|
||||
|
||||
|
||||
self.sources = {
|
||||
"mu_source": ColumnDataSource(data={}),
|
||||
"sigma_source": ColumnDataSource(data={}),
|
||||
"s10_source" :ColumnDataSource(data={}),
|
||||
"s10_source": ColumnDataSource(data={}),
|
||||
"s2_source": ColumnDataSource(data={})
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Look for and read all the run files in the current directory (ending with
|
||||
# .vfcrun.hd5), and lanch a Bokeh server for the visualization of this data.
|
||||
# .vfcrunh5), and lanch a Bokeh server for the visualization of this data.
|
||||
|
||||
import os
|
||||
import sys
|
||||
@ -14,18 +14,16 @@ import compare_runs
|
||||
import inspect_runs
|
||||
import helper
|
||||
|
||||
################################################################################
|
||||
##########################################################################
|
||||
|
||||
# Read vfcrun files, and aggregate them in one dataset
|
||||
|
||||
# Read vfcrun files, and aggregate them in one dataset
|
||||
|
||||
run_files = [ f for f in os.listdir(".") if f.endswith(".vfcrun.hd5") ]
|
||||
run_files = [f for f in os.listdir(".") if f.endswith(".vfcrun.h5")]
|
||||
|
||||
if len(run_files) == 0:
|
||||
print(
|
||||
"Warning [vfc_ci]: Could not find any vfcrun files in the directory. " \
|
||||
"This will result in server errors and prevent you from viewing the report."
|
||||
)
|
||||
"Warning [vfc_ci]: Could not find any vfcrun files in the directory. "
|
||||
"This will result in server errors and prevent you from viewing the report.")
|
||||
|
||||
# These are arrays of Pandas dataframes for now
|
||||
metadata = []
|
||||
@ -55,15 +53,14 @@ metadata["date"] = metadata.index.to_series().map(
|
||||
)
|
||||
|
||||
|
||||
################################################################################
|
||||
##########################################################################
|
||||
|
||||
|
||||
curdoc().title = "Verificarlo Report"
|
||||
|
||||
|
||||
# Read server arguments
|
||||
# (this is quite easy because Bokeh server is called through a wrapper, so
|
||||
# we know exactly what the arguments might be)
|
||||
# Read server arguments
|
||||
# (this is quite easy because Bokeh server is called through a wrapper, so
|
||||
# we know exactly what the arguments might be)
|
||||
|
||||
git_repo_linked = False
|
||||
commit_link = ""
|
||||
@ -83,7 +80,6 @@ for i in range(1, len(sys.argv)):
|
||||
address = sys.argv[i + 2]
|
||||
url = ""
|
||||
|
||||
|
||||
# Here, address is either the remote URL or the path to the local Git
|
||||
# repo (depending on the method)
|
||||
|
||||
@ -99,12 +95,11 @@ for i in range(1, len(sys.argv)):
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
"Error [vfc_ci]: The specified method to get the Git " \
|
||||
"repository is invalid. Are you calling Bokeh directly " \
|
||||
"Error [vfc_ci]: The specified method to get the Git "
|
||||
"repository is invalid. Are you calling Bokeh directly "
|
||||
"instead of using the Verificarlo wrapper ?"
|
||||
)
|
||||
|
||||
|
||||
# At this point, "url" should be set correctly, we can get the repo's
|
||||
# URL and name, after making sure we're on a Git URL
|
||||
|
||||
@ -113,7 +108,7 @@ for i in range(1, len(sys.argv)):
|
||||
path = parsed_url.path.split("/")
|
||||
if len(path) < 3:
|
||||
raise ValueError(
|
||||
"Error [vfc_ci]: The found URL doesn't seem to be pointing " \
|
||||
"Error [vfc_ci]: The found URL doesn't seem to be pointing "
|
||||
"to a Git repository (path is too short)"
|
||||
)
|
||||
|
||||
@ -122,7 +117,6 @@ for i in range(1, len(sys.argv)):
|
||||
curdoc().template_variables["repo_url"] = url
|
||||
curdoc().template_variables["repo_name"] = repo_name
|
||||
|
||||
|
||||
# We should have a "github.com" or a "*gitlab*" URL
|
||||
|
||||
if parsed_url.netloc == "github.com":
|
||||
@ -148,8 +142,6 @@ for i in range(1, len(sys.argv)):
|
||||
|
||||
git_repo_linked = True
|
||||
|
||||
|
||||
|
||||
# Look for a logo URL
|
||||
# If a logo URL is specified, it will be included in the report's header
|
||||
if sys.argv[i] == "logo":
|
||||
@ -162,10 +154,9 @@ curdoc().template_variables["git_repo_linked"] = git_repo_linked
|
||||
curdoc().template_variables["has_logo"] = has_logo
|
||||
|
||||
|
||||
################################################################################
|
||||
##########################################################################
|
||||
|
||||
|
||||
# Setup report views
|
||||
# Setup report views
|
||||
|
||||
# Define a ViewsMaster class to allow two-ways communication between views.
|
||||
# This approach by classes allows us to have separate scopes for each view and
|
||||
@ -179,8 +170,7 @@ class ViewsMaster:
|
||||
def go_to_inspect(self, run_name):
|
||||
self.inspect.switch_view(run_name)
|
||||
|
||||
|
||||
#Constructor
|
||||
# Constructor
|
||||
|
||||
def __init__(self, data, metadata, git_repo_linked, commit_link):
|
||||
|
||||
@ -190,28 +180,29 @@ class ViewsMaster:
|
||||
self.commit_link = commit_link
|
||||
|
||||
# Pass metadata to the template as a JSON string
|
||||
curdoc().template_variables["metadata"] = self.metadata.to_json(orient="index")
|
||||
curdoc().template_variables["metadata"] = self.metadata.to_json(
|
||||
orient="index")
|
||||
|
||||
# Runs comparison
|
||||
self.compare = compare_runs.CompareRuns(
|
||||
master = self,
|
||||
doc = curdoc(),
|
||||
data = data,
|
||||
metadata = metadata,
|
||||
master=self,
|
||||
doc=curdoc(),
|
||||
data=data,
|
||||
metadata=metadata,
|
||||
)
|
||||
|
||||
# Runs inspection
|
||||
self.inspect = inspect_runs.InspectRuns(
|
||||
master = self,
|
||||
doc = curdoc(),
|
||||
data = data,
|
||||
metadata = metadata,
|
||||
master=self,
|
||||
doc=curdoc(),
|
||||
data=data,
|
||||
metadata=metadata,
|
||||
)
|
||||
|
||||
|
||||
views_master = ViewsMaster(
|
||||
data = data,
|
||||
metadata = metadata,
|
||||
git_repo_linked = git_repo_linked,
|
||||
commit_link = commit_link
|
||||
data=data,
|
||||
metadata=metadata,
|
||||
git_repo_linked=git_repo_linked,
|
||||
commit_link=commit_link
|
||||
)
|
||||
|
@ -15,21 +15,19 @@ def fill_dotplot(
|
||||
):
|
||||
|
||||
# (Optional) Tooltip and tooltip formatters
|
||||
if tooltips != None:
|
||||
hover = HoverTool(tooltips = tooltips, mode="vline", names=["circle"])
|
||||
if tooltips is not None:
|
||||
hover = HoverTool(tooltips=tooltips, mode="vline", names=["circle"])
|
||||
|
||||
if tooltips_formatters != None:
|
||||
if tooltips_formatters is not None:
|
||||
hover.formatters = tooltips_formatters
|
||||
|
||||
plot.add_tools(hover)
|
||||
|
||||
|
||||
# (Optional) Add TapTool (for JS tap callback)
|
||||
if js_tap_callback != None:
|
||||
if js_tap_callback is not None:
|
||||
tap = TapTool(callback=CustomJS(code=js_tap_callback))
|
||||
plot.add_tools(tap)
|
||||
|
||||
|
||||
# (Optional) Add segment to represent a lower bound
|
||||
if lower_bound:
|
||||
lower_segment = plot.segment(
|
||||
@ -38,24 +36,20 @@ def fill_dotplot(
|
||||
source=source, line_color="black"
|
||||
)
|
||||
|
||||
|
||||
# Draw dots (actually Bokeh circles)
|
||||
circle = plot.circle(
|
||||
name="circle",
|
||||
x="%s_x" % data_field, y=data_field, source=source, size=12
|
||||
)
|
||||
|
||||
|
||||
# (Optional) Draw lines between dots
|
||||
if lines:
|
||||
line = plot.line(x="%s_x" % data_field, y=data_field, source=source)
|
||||
|
||||
|
||||
# (Optional) Add server tap callback
|
||||
if server_tap_callback != None:
|
||||
if server_tap_callback is not None:
|
||||
circle.data_source.selected.on_change("indices", server_tap_callback)
|
||||
|
||||
|
||||
# Plot appearance
|
||||
plot.xgrid.grid_line_color = None
|
||||
plot.ygrid.grid_line_color = None
|
||||
@ -64,33 +58,30 @@ def fill_dotplot(
|
||||
plot.yaxis[0].formatter.power_limit_low = 0
|
||||
plot.yaxis[0].formatter.precision = 3
|
||||
|
||||
plot.xaxis[0].major_label_orientation = pi/8
|
||||
|
||||
plot.xaxis[0].major_label_orientation = pi / 8
|
||||
|
||||
|
||||
def fill_boxplot(
|
||||
plot, source,
|
||||
prefix="",
|
||||
tooltips=None, tooltips_formatters=None,
|
||||
js_tap_callback=None, server_tap_callback=None,
|
||||
js_tap_callback=None, server_tap_callback=None
|
||||
):
|
||||
|
||||
# (Optional) Tooltip and tooltip formatters
|
||||
if tooltips != None:
|
||||
hover = HoverTool(tooltips = tooltips, mode="vline", names=["full_box"])
|
||||
if tooltips is not None:
|
||||
hover = HoverTool(tooltips=tooltips, mode="vline", names=["full_box"])
|
||||
|
||||
if tooltips_formatters != None:
|
||||
if tooltips_formatters is not None:
|
||||
hover.formatters = tooltips_formatters
|
||||
|
||||
plot.add_tools(hover)
|
||||
|
||||
|
||||
# (Optional) Add TapTool (for JS tap callback)
|
||||
if js_tap_callback != None:
|
||||
if js_tap_callback is not None:
|
||||
tap = TapTool(callback=CustomJS(code=js_tap_callback))
|
||||
plot.add_tools(tap)
|
||||
|
||||
|
||||
# Draw boxes (the prefix argument modifies the fields of ColumnDataSource
|
||||
# that are used)
|
||||
|
||||
@ -128,18 +119,18 @@ def fill_boxplot(
|
||||
color="black"
|
||||
)
|
||||
|
||||
|
||||
# (Optional) Add server tap callback
|
||||
if server_tap_callback != None:
|
||||
if server_tap_callback is not None:
|
||||
top_stem.data_source.selected.on_change("indices", server_tap_callback)
|
||||
bottom_stem.data_source.selected.on_change("indices", server_tap_callback)
|
||||
bottom_stem.data_source.selected.on_change(
|
||||
"indices", server_tap_callback)
|
||||
|
||||
full_box.data_source.selected.on_change("indices", server_tap_callback)
|
||||
bottom_box.data_source.selected.on_change("indices", server_tap_callback)
|
||||
bottom_box.data_source.selected.on_change(
|
||||
"indices", server_tap_callback)
|
||||
|
||||
mu_dot.data_source.selected.on_change("indices", server_tap_callback)
|
||||
|
||||
|
||||
# Plot appearance
|
||||
plot.xgrid.grid_line_color = None
|
||||
plot.ygrid.grid_line_color = None
|
||||
@ -148,4 +139,4 @@ def fill_boxplot(
|
||||
plot.yaxis[0].formatter.power_limit_low = 0
|
||||
plot.yaxis[0].formatter.precision = 3
|
||||
|
||||
plot.xaxis[0].major_label_orientation = pi/8
|
||||
plot.xaxis[0].major_label_orientation = pi / 8
|
||||
|
@ -291,19 +291,19 @@
|
||||
<!-- PLOTS -->
|
||||
<div class="column is-9">
|
||||
<h3 class="title is-3">Plots</h3>
|
||||
<div class="card plot-card">
|
||||
<div class="card plot-card" style="z-index: 3;">
|
||||
{{ embed(roots.s_tabs_inspect) }}
|
||||
</div>
|
||||
|
||||
<br>
|
||||
|
||||
<div class="card plot-card">
|
||||
<div class="card plot-card" style="z-index: 2;">
|
||||
{{ embed(roots.sigma_inspect) }}
|
||||
</div>
|
||||
|
||||
<br>
|
||||
|
||||
<div class="card plot-card">
|
||||
<div class="card plot-card" style="z-index: 1;">
|
||||
{{ embed(roots.mu_inspect) }}
|
||||
</div>
|
||||
</div>
|
||||
@ -325,6 +325,8 @@
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
<!-- JAVASCRIPT -->
|
||||
<script>
|
||||
|
||||
// Listen to clicks on breadcrumb (for responsive header)
|
||||
|
@ -29,7 +29,7 @@ run_verificarlo_tests:
|
||||
- git fetch --all
|
||||
- git checkout -b {{ci_branch}} origin/{{ci_branch}}
|
||||
- mkdir -p vfcruns
|
||||
- mv *.vfcrun.hd5 vfcruns
|
||||
- mv *.vfcrun.h5 vfcruns
|
||||
- git add vfcruns/*
|
||||
- git commit -m "[auto] New test results for commit ${git_hash}"
|
||||
- git push
|
||||
@ -39,4 +39,4 @@ run_verificarlo_tests:
|
||||
|
||||
artifacts:
|
||||
paths:
|
||||
- "*.vfcraw.hd5"
|
||||
- "*.vfcraw.h5"
|
||||
|
@ -26,7 +26,7 @@ jobs:
|
||||
- name: Install Python requirements
|
||||
run: |
|
||||
pip install numpy scipy pandas bokeh jinja2 tables GitPython
|
||||
|
||||
apt update
|
||||
apt install wget
|
||||
wget https://raw.githubusercontent.com/verificarlo/significantdigits/main/sigdigits.py -P /usr/local/lib/python3.8/dist-packages
|
||||
|
||||
@ -45,7 +45,7 @@ jobs:
|
||||
|
||||
git checkout {{ci_branch}}
|
||||
mkdir -p vfcruns
|
||||
mv *.vfcrun.hd5 vfcruns
|
||||
mv *.vfcrun.h5 vfcruns
|
||||
git add vfcruns/*
|
||||
git commit -m "[auto] New test results for commit ${git_hash}"
|
||||
git push
|
||||
@ -54,4 +54,4 @@ jobs:
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
{% raw %}name: ${{github.sha}}.vfcraw{% endraw %}
|
||||
path: ./*.vfcraw.hd5
|
||||
path: ./*.vfcraw.h5
|
||||
|
32
vfc_ci
32
vfc_ci
@ -10,9 +10,10 @@
|
||||
|
||||
import argparse
|
||||
|
||||
################################################################################
|
||||
##########################################################################
|
||||
|
||||
# Parameters validation helpers
|
||||
|
||||
# Parameters validation helpers
|
||||
|
||||
def is_port(string):
|
||||
value = int(string)
|
||||
@ -31,8 +32,7 @@ def is_directory(string):
|
||||
return string
|
||||
|
||||
|
||||
################################################################################
|
||||
|
||||
##########################################################################
|
||||
|
||||
# Subcommand decorator
|
||||
|
||||
@ -41,6 +41,7 @@ cli = argparse.ArgumentParser(
|
||||
)
|
||||
subparsers = cli.add_subparsers(dest="subcommand")
|
||||
|
||||
|
||||
def subcommand(description="", args=[], parent=subparsers):
|
||||
def decorator(func):
|
||||
parser = parent.add_parser(func.__name__, description=description)
|
||||
@ -54,15 +55,13 @@ def argument(*name_or_flags, **kwargs):
|
||||
return ([*name_or_flags], kwargs)
|
||||
|
||||
|
||||
|
||||
################################################################################
|
||||
|
||||
##########################################################################
|
||||
|
||||
# "setup" subcommand
|
||||
|
||||
@subcommand(
|
||||
description="Create an automated workflow to execute Verificarlo tests.",
|
||||
args = [
|
||||
args=[
|
||||
argument(
|
||||
"git_host",
|
||||
help="""
|
||||
@ -76,13 +75,12 @@ def setup(args):
|
||||
import ci.setup
|
||||
ci.setup.setup(args.git_host)
|
||||
|
||||
|
||||
|
||||
# "test" subcommand
|
||||
|
||||
|
||||
@subcommand(
|
||||
description="Execute predefined Verificarlo tests and save their results.",
|
||||
args = [
|
||||
args=[
|
||||
argument(
|
||||
"-g", "--is-git-commit",
|
||||
help="""
|
||||
@ -112,15 +110,14 @@ def test(args):
|
||||
import ci.test
|
||||
ci.test.run(args.is_git_commit, args.export_raw_results, args.dry_run)
|
||||
|
||||
|
||||
|
||||
# "serve" subcommand
|
||||
|
||||
|
||||
@subcommand(
|
||||
description="""
|
||||
Start a server to visualize Verificarlo test results.
|
||||
""",
|
||||
args = [
|
||||
args=[
|
||||
argument(
|
||||
"-s", "--show",
|
||||
help="""
|
||||
@ -174,9 +171,9 @@ def test(args):
|
||||
def serve(args):
|
||||
|
||||
# git_directory and git_url are supposed to be exclusive
|
||||
if args.git_directory != None and args.git_url != None:
|
||||
if args.git_directory is not None and args.git_url is not None:
|
||||
raise argparse.ArgumentTypeError(
|
||||
"\"-gd\" / \"--git-directory\" and \"-gu\" / \"--git-url\" are "\
|
||||
"\"-gd\" / \"--git-directory\" and \"-gu\" / \"--git-url\" are "
|
||||
"mutually exclusive. Please make sure to use at most one of them."
|
||||
)
|
||||
|
||||
@ -191,12 +188,9 @@ def serve(args):
|
||||
)
|
||||
|
||||
|
||||
|
||||
###############################################################################
|
||||
|
||||
|
||||
# Main command group and entry point
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = cli.parse_args()
|
||||
if args.subcommand is None:
|
||||
|
Loading…
Reference in New Issue
Block a user