Update vfc_ci code

This commit is contained in:
Aurélien Delval 2021-05-03 15:47:44 +02:00
parent d81777e347
commit 44f0fc1f51
20 changed files with 398 additions and 477 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -3,24 +3,24 @@
import os import os
def serve(show, git_directory, git_url, port, allow_origin, logo_url): def serve(show, git_directory, git_url, port, allow_origin, logo_url):
# Prepare arguments # Prepare arguments
show = "--show" if show else "" show = "--show" if show else ""
git = "" git = ""
if git_directory != None: if git_directory is not None:
git = "git directory %s" % git_directory git = "git directory %s" % git_directory
if git_url != None: if git_url is not None:
git = "git url %s" % git_url git = "git url %s" % git_url
logo = "" logo = ""
if logo_url != None: if logo_url is not None:
logo = "logo %s" % logo_url logo = "logo %s" % logo_url
dirname = os.path.dirname(__file__) dirname = os.path.dirname(__file__)
# Call the "bokeh serve" command on the system # Call the "bokeh serve" command on the system
command = "bokeh serve %s/vfc_ci_report %s --allow-websocket-origin=%s:%s --port %s --args %s %s" \ command = "bokeh serve %s/vfc_ci_report %s --allow-websocket-origin=%s:%s --port %s --args %s %s" \
% (dirname, show, allow_origin, port, port, git, logo) % (dirname, show, allow_origin, port, port, git, logo)

View File

@ -6,11 +6,11 @@ import sys
import os import os
from jinja2 import Environment, FileSystemLoader from jinja2 import Environment, FileSystemLoader
################################################################################ ##########################################################################
# Helper functions # Helper functions
def gen_readme(dev_branch, ci_branch): def gen_readme(dev_branch, ci_branch):
# Init template loader # Init template loader
@ -26,17 +26,16 @@ def gen_readme(dev_branch, ci_branch):
fh.write(render) fh.write(render)
def gen_workflow(git_host, dev_branch, ci_branch, repo): def gen_workflow(git_host, dev_branch, ci_branch, repo):
# Init template loader # Init template loader
path = os.path.dirname(os.path.abspath(__file__)) path = os.path.dirname(os.path.abspath(__file__))
env = Environment(loader=FileSystemLoader(path)) env = Environment(loader=FileSystemLoader(path))
if git_host == "github": if git_host == "github":
# Load template # Load template
template = env.get_template("workflow_templates/vfc_test_workflow.j2.yml") template = env.get_template(
"workflow_templates/vfc_test_workflow.j2.yml")
# Render it # Render it
render = template.render(dev_branch=dev_branch, ci_branch=ci_branch) render = template.render(dev_branch=dev_branch, ci_branch=ci_branch)
@ -47,13 +46,14 @@ def gen_workflow(git_host, dev_branch, ci_branch, repo):
with open(filename, "w") as fh: with open(filename, "w") as fh:
fh.write(render) fh.write(render)
if git_host == "gitlab": if git_host == "gitlab":
template = env.get_template("workflow_templates/gitlab-ci.j2.yml") template = env.get_template("workflow_templates/gitlab-ci.j2.yml")
# Ask for the user who will run the jobs (Gitlab specific) # Ask for the user who will run the jobs (Gitlab specific)
username = input("[vfc_ci] Enter the name of the user who will run the CI jobs:") username = input(
email = input("[vfc_ci] Enter the e-mail of the user who will run the CI jobs:") "[vfc_ci] Enter the name of the user who will run the CI jobs:")
email = input(
"[vfc_ci] Enter the e-mail of the user who will run the CI jobs:")
remote_url = repo.remotes[0].config_reader.get("url") remote_url = repo.remotes[0].config_reader.get("url")
remote_url = remote_url.replace("http://", "") remote_url = remote_url.replace("http://", "")
@ -72,12 +72,10 @@ def gen_workflow(git_host, dev_branch, ci_branch, repo):
fh.write(render) fh.write(render)
##########################################################################
################################################################################
def setup(git_host): def setup(git_host):
# Init repo and make sure that the workflow setup is possible # Init repo and make sure that the workflow setup is possible
repo = git.Repo(".") repo = git.Repo(".")
@ -91,8 +89,9 @@ def setup(git_host):
dev_branch_name = str(dev_branch) dev_branch_name = str(dev_branch)
dev_remote = dev_branch.tracking_branch() dev_remote = dev_branch.tracking_branch()
# Make sure that the active branch (on which to setup the workflow) has a remote # Make sure that the active branch (on which to setup the workflow) has a
assert(dev_remote != None), "Error [vfc_ci]: The current branch doesn't " \ # remote
assert(dev_remote is not None), "Error [vfc_ci]: The current branch doesn't " \
"have a remote." "have a remote."
# Make sure that we are not behind the remote (so we can push safely later) # Make sure that we are not behind the remote (so we can push safely later)
@ -101,8 +100,6 @@ def setup(git_host):
assert(commits_behind == []), "Error [vfc_ci]: The local branch seems " \ assert(commits_behind == []), "Error [vfc_ci]: The local branch seems " \
"to be at least one commit behind remote." "to be at least one commit behind remote."
# Commit the workflow on the current (dev) branch # Commit the workflow on the current (dev) branch
ci_branch_name = "vfc_ci_%s" % dev_branch_name ci_branch_name = "vfc_ci_%s" % dev_branch_name
@ -111,8 +108,6 @@ def setup(git_host):
repo.index.commit("[auto] Set up Verificarlo CI on this branch") repo.index.commit("[auto] Set up Verificarlo CI on this branch")
repo.remote(name="origin").push() repo.remote(name="origin").push()
# Create the CI branch (orphan branch with a readme on it) # Create the CI branch (orphan branch with a readme on it)
# (see : https://github.com/gitpython-developers/GitPython/issues/615) # (see : https://github.com/gitpython-developers/GitPython/issues/615)
@ -133,23 +128,19 @@ def setup(git_host):
# Force checkout back to the original (dev) branch # Force checkout back to the original (dev) branch
repo.git.checkout(dev_branch_name, force=True) repo.git.checkout(dev_branch_name, force=True)
# Print termination messages # Print termination messages
print( print(
"Info [vfc_ci]: A Verificarlo CI workflow has been setup on " \ "Info [vfc_ci]: A Verificarlo CI workflow has been setup on "
"%s." % dev_branch_name "%s." % dev_branch_name
) )
print( print(
"Info [vfc_ci]: Make sure that you have a \"vfc_tests_config.json\" on " \ "Info [vfc_ci]: Make sure that you have a \"vfc_tests_config.json\" on "
"this branch. You can also perform a \"vfc_ci test\" dry run before "\ "this branch. You can also perform a \"vfc_ci test\" dry run before "
"pushing other commits." "pushing other commits.")
)
if git_host == "gitlab": if git_host == "gitlab":
print( print(
"Info [vfc_ci]: Since you are using GitLab, make sure that you " \ "Info [vfc_ci]: Since you are using GitLab, make sure that you "
"have created an access token for the user you specified (registered "\ "have created an access token for the user you specified (registered "
"as a variable called \"CI_PUSH_TOKEN\" in your repository)." "as a variable called \"CI_PUSH_TOKEN\" in your repository).")
)

View File

@ -1,6 +1,10 @@
# This script reads the vfc_tests_config.json file and executes tests accordingly # This script reads the vfc_tests_config.json file and executes tests accordingly
# It will also generate a ... .vfcrun.hd5 file with the results of the run # It will also generate a ... .vfcrunh5 file with the results of the run
import sigdigits as sd
import scipy.stats
import numpy as np
import pandas as pd
import os import os
import json import json
@ -12,19 +16,13 @@ import time
import pickle import pickle
pickle.HIGHEST_PROTOCOL = 4 pickle.HIGHEST_PROTOCOL = 4
import pandas as pd
import numpy as np
import scipy.stats
import sigdigits as sd
# Magic numbers # Magic numbers
min_pvalue = 0.05 min_pvalue = 0.05
max_zscore = 3 max_zscore = 3
################################################################################ ##########################################################################
# Helper functions # Helper functions
@ -36,7 +34,7 @@ def read_probes_csv(filepath, backend, warnings, execution_data):
except FileNotFoundError: except FileNotFoundError:
print( print(
"Warning [vfc_ci]: Probes not found, your code might have crashed " \ "Warning [vfc_ci]: Probes not found, your code might have crashed "
"or you might have forgotten to call vfc_dump_probes" "or you might have forgotten to call vfc_dump_probes"
) )
warnings.append(execution_data) warnings.append(execution_data)
@ -46,7 +44,7 @@ def read_probes_csv(filepath, backend, warnings, execution_data):
except Exception: except Exception:
print( print(
"Warning [vfc_ci]: Your probes could not be read for some unknown " \ "Warning [vfc_ci]: Your probes could not be read for some unknown "
"reason" "reason"
) )
warnings.append(execution_data) warnings.append(execution_data)
@ -56,12 +54,11 @@ def read_probes_csv(filepath, backend, warnings, execution_data):
if len(results) == 0: if len(results) == 0:
print( print(
"Warning [vfc_ci]: Probes empty, it looks like you have dumped " \ "Warning [vfc_ci]: Probes empty, it looks like you have dumped "
"them without calling vfc_put_probe" "them without calling vfc_put_probe"
) )
warnings.append(execution_data) warnings.append(execution_data)
# Once the CSV has been opened and validated, return its content # Once the CSV has been opened and validated, return its content
results["value"] = results["value"].apply(lambda x: float.fromhex(x)) results["value"] = results["value"].apply(lambda x: float.fromhex(x))
results.rename(columns={"value": "values"}, inplace=True) results.rename(columns={"value": "values"}, inplace=True)
@ -75,6 +72,9 @@ def read_probes_csv(filepath, backend, warnings, execution_data):
def significant_digits(x): def significant_digits(x):
# If the null hypothesis is rejected, call sigdigits with the General
# formula:
if x.pvalue < min_pvalue:
# In a pandas DF, "values" actually refers to the array of columns, and # In a pandas DF, "values" actually refers to the array of columns, and
# not the column named "values" # not the column named "values"
distribution = x.values[3] distribution = x.values[3]
@ -83,41 +83,31 @@ def significant_digits(x):
# The distribution's empirical average will be used as the reference # The distribution's empirical average will be used as the reference
mu = np.array([x.mu]) mu = np.array([x.mu])
# If the null hypothesis is rejected, call sigdigits with General mode:
if x.pvalue < min_pvalue:
method = sd.Method.General
s = sd.significant_digits( s = sd.significant_digits(
distribution, distribution,
mu, mu,
precision=sd.Precision.Absolute, precision=sd.Precision.Relative,
method=method method=sd.Method.General,
probability=0.9,
confidence=0.95
) )
# s is returned inside a list
# Else, manually compute sMCA which is equivalent to a 66% confidence interval
else:
method = sd.Method.CNH
s = sd.significant_digits(
distribution,
mu,
precision=sd.Precision.Absolute,
method=method,
probability=0.66,
confidence=0.66,
)
# s is returned as a size 1 list
return s[0] return s[0]
# Else, manually compute sMCA (Stott-Parker formula)
else:
return -np.log2(np.absolute(x.sigma / x.mu))
def significant_digits_lower_bound(x): def significant_digits_lower_bound(x):
# If the null hypothesis is rejected, no lower bound # If the null hypothesis is rejected, no lower bound
if x.pvalue < min_pvalue: if x.pvalue < min_pvalue:
return x.s2 return x.s2
# Else, the lower bound will be a 95% confidence interval # Else, the lower bound will be computed with p= .9 alpha-1=.95
else:
distribution = x.values[3] distribution = x.values[3]
distribution = distribution.reshape(len(distribution), 1) distribution = distribution.reshape(len(distribution), 1)
@ -126,16 +116,17 @@ def significant_digits_lower_bound(x):
s = sd.significant_digits( s = sd.significant_digits(
distribution, distribution,
mu, mu,
precision=sd.Precision.Absolute, precision=sd.Precision.Relative,
method=sd.Method.CNH, method=sd.Method.CNH,
probability=0.9,
confidence=0.95
) )
return s[0] return s[0]
################################################################################ ##########################################################################
# Main functions # Main functions
@ -154,7 +145,6 @@ def read_config():
return json.loads(data) return json.loads(data)
# Set up metadata # Set up metadata
def generate_metadata(is_git_commit): def generate_metadata(is_git_commit):
@ -167,7 +157,6 @@ def generate_metadata(is_git_commit):
"message": "" "message": ""
} }
if is_git_commit: if is_git_commit:
print("Fetching metadata from last commit...") print("Fetching metadata from last commit...")
from git import Repo from git import Repo
@ -185,7 +174,6 @@ def generate_metadata(is_git_commit):
return metadata return metadata
# Execute tests and collect results in a Pandas dataframe (+ dataprocessing) # Execute tests and collect results in a Pandas dataframe (+ dataprocessing)
def run_tests(config): def run_tests(config):
@ -204,10 +192,10 @@ def run_tests(config):
# not get any data # not get any data
warnings = [] warnings = []
# Tests execution loop # Tests execution loop
for executable in config["executables"]: for executable in config["executables"]:
print("Info [vfc_ci]: Running executable :", executable["executable"], "...") print("Info [vfc_ci]: Running executable :",
executable["executable"], "...")
parameters = "" parameters = ""
if "parameters" in executable: if "parameters" in executable:
@ -245,26 +233,23 @@ def run_tests(config):
n_files = n_files + 1 n_files = n_files + 1
# Clean CSV output files (by deleting the tmp folder) # Clean CSV output files (by deleting the tmp folder)
os.system("rm -rf .vfcruns.tmp") os.system("rm -rf .vfcruns.tmp")
# Combine all separate executions in one dataframe # Combine all separate executions in one dataframe
data = pd.concat(data, sort=False, ignore_index=True) data = pd.concat(data, sort=False, ignore_index=True)
data = data.groupby(["test", "vfc_backend", "variable"])\ data = data.groupby(["test", "vfc_backend", "variable"])\
.values.apply(list).reset_index() .values.apply(list).reset_index()
# Make sure we have some data to work on # Make sure we have some data to work on
assert(len(data) != 0), "Error [vfc_ci]: No data have been generated " \ assert(len(data) != 0), "Error [vfc_ci]: No data have been generated " \
"by your tests executions, aborting run without writing results file" "by your tests executions, aborting run without writing results file"
return data, warnings return data, warnings
# Data processing # Data processing
def data_processing(data): def data_processing(data):
data["values"] = data["values"].apply(lambda x: np.array(x).astype(float)) data["values"] = data["values"].apply(lambda x: np.array(x).astype(float))
@ -272,8 +257,8 @@ def data_processing(data):
# Get empirical average, standard deviation and p-value # Get empirical average, standard deviation and p-value
data["mu"] = data["values"].apply(np.average) data["mu"] = data["values"].apply(np.average)
data["sigma"] = data["values"].apply(np.std) data["sigma"] = data["values"].apply(np.std)
data["pvalue"] = data["values"].apply(lambda x: scipy.stats.shapiro(x).pvalue) data["pvalue"] = data["values"].apply(
lambda x: scipy.stats.shapiro(x).pvalue)
# Significant digits # Significant digits
data["s2"] = data.apply(significant_digits, axis=1) data["s2"] = data.apply(significant_digits, axis=1)
@ -281,8 +266,8 @@ def data_processing(data):
# Lower bound of the confidence interval using the sigdigits module # Lower bound of the confidence interval using the sigdigits module
data["s2_lower_bound"] = data.apply(significant_digits_lower_bound, axis=1) data["s2_lower_bound"] = data.apply(significant_digits_lower_bound, axis=1)
data["s10_lower_bound"] = data["s2_lower_bound"].apply(lambda x: sd.change_base(x, 10)) data["s10_lower_bound"] = data["s2_lower_bound"].apply(
lambda x: sd.change_base(x, 10))
# Compute moments of the distribution # Compute moments of the distribution
# (including a new distribution obtained by filtering outliers) # (including a new distribution obtained by filtering outliers)
@ -297,13 +282,13 @@ def data_processing(data):
data["nsamples"] = data["values"].apply(len) data["nsamples"] = data["values"].apply(len)
# Display all executions that resulted in a warning # Display all executions that resulted in a warning
def show_warnings(warnings): def show_warnings(warnings):
if len(warnings) > 0: if len(warnings) > 0:
print( print(
"Warning [vfc_ci]: Some of your runs could not generate any data " \ "Warning [vfc_ci]: Some of your runs could not generate any data "
"(for instance because your code crashed) and resulted in " "(for instance because your code crashed) and resulted in "
"warnings. Here is the complete list :" "warnings. Here is the complete list :"
) )
@ -316,9 +301,7 @@ def show_warnings(warnings):
print(" Repetition: %s" % warnings[i]["repetition"]) print(" Repetition: %s" % warnings[i]["repetition"])
##########################################################################
################################################################################
# Entry point # Entry point
@ -334,54 +317,51 @@ def run(is_git_commit, export_raw_values, dry_run):
data, warnings = run_tests(config) data, warnings = run_tests(config)
show_warnings(warnings) show_warnings(warnings)
# Data processing # Data processing
print("Info [vfc_ci]: Processing data...") print("Info [vfc_ci]: Processing data...")
data_processing(data) data_processing(data)
# Prepare data for export (by creating a proper index and linking run
# Prepare data for export (by creating a proper index and linking run timestamp) # timestamp)
data = data.set_index(["test", "variable", "vfc_backend"]).sort_index() data = data.set_index(["test", "variable", "vfc_backend"]).sort_index()
data["timestamp"] = metadata["timestamp"] data["timestamp"] = metadata["timestamp"]
filename = metadata["hash"] if is_git_commit else str(metadata["timestamp"]) filename = metadata["hash"] if is_git_commit else str(
metadata["timestamp"])
# Prepare metadata for export # Prepare metadata for export
metadata = pd.DataFrame.from_dict([metadata]) metadata = pd.DataFrame.from_dict([metadata])
metadata = metadata.set_index("timestamp") metadata = metadata.set_index("timestamp")
# NOTE : Exporting to HDF5 requires to install "tables" on the system # NOTE : Exporting to HDF5 requires to install "tables" on the system
# Export raw data if needed # Export raw data if needed
if export_raw_values and not dry_run: if export_raw_values and not dry_run:
data.to_hdf(filename + ".vfcraw.hd5", key="data") data.to_hdf(filename + ".vfcraw.h5", key="data")
metadata.to_hdf(filename + ".vfcraw.hd5", key="metadata") metadata.to_hdf(filename + ".vfcraw.h5", key="metadata")
# Export data # Export data
del data["values"] del data["values"]
if not dry_run: if not dry_run:
data.to_hdf(filename + ".vfcrun.hd5", key="data") data.to_hdf(filename + ".vfcrun.h5", key="data")
metadata.to_hdf(filename + ".vfcrun.hd5", key="metadata") metadata.to_hdf(filename + ".vfcrun.h5", key="metadata")
# Print termination messages # Print termination messages
print( print(
"Info [vfc_ci]: The results have been successfully written to " \ "Info [vfc_ci]: The results have been successfully written to "
"%s.vfcrun.hd5." \ "%s.vfcrun.h5."
% filename % filename
) )
if export_raw_values: if export_raw_values:
print( print(
"Info [vfc_ci]: A file containing the raw values has also been " \ "Info [vfc_ci]: A file containing the raw values has also been "
"created : %s.vfcraw.hd5." "created : %s.vfcraw.h5."
% filename % filename
) )
if dry_run: if dry_run:
print( print(
"Info [vfc_ci]: The dry run flag was enabled, so no files were " \ "Info [vfc_ci]: The dry run flag was enabled, so no files were "
"actually created." "actually created."
) )

View File

@ -15,8 +15,7 @@ import helper
import plot import plot
################################################################################ ##########################################################################
class CompareRuns: class CompareRuns:
@ -44,43 +43,40 @@ class CompareRuns:
if n == 0 or n > len(timestamps): if n == 0 or n > len(timestamps):
n = len(timestamps) n = len(timestamps)
for i in range(0, n): for i in range(0, n):
# Get metadata associated to this run # Get metadata associated to this run
row_metadata = helper.get_metadata(self.metadata, timestamps[-i-1]) row_metadata = helper.get_metadata(
self.metadata, timestamps[-i - 1])
date = time.ctime(timestamps[-i - 1]) date = time.ctime(timestamps[-i - 1])
# Fill the x series # Fill the x series
str = row_metadata["name"] str = row_metadata["name"]
x_series.insert(0, helper.get_metadata(self.metadata, timestamps[-i-1])["name"]) x_series.insert(0, helper.get_metadata(
self.metadata, timestamps[-i - 1])["name"])
# Fill the metadata lists # Fill the metadata lists
x_metadata["date"].insert(0, date) x_metadata["date"].insert(0, date)
x_metadata["is_git_commit"].insert(0, row_metadata["is_git_commit"]) x_metadata["is_git_commit"].insert(
0, row_metadata["is_git_commit"])
x_metadata["hash"].insert(0, row_metadata["hash"]) x_metadata["hash"].insert(0, row_metadata["hash"])
x_metadata["author"].insert(0, row_metadata["author"]) x_metadata["author"].insert(0, row_metadata["author"])
x_metadata["message"].insert(0, row_metadata["message"]) x_metadata["message"].insert(0, row_metadata["message"])
return x_series, x_metadata return x_series, x_metadata
# Plots update function # Plots update function
def update_plots(self): def update_plots(self):
# Select all data matching current test/var/backend # Select all data matching current test/var/backend
runs = self.data.loc[ runs = self.data.loc[[self.widgets["select_test"].value],
[self.widgets["select_test"].value], self.widgets["select_var"].value,
self.widgets["select_var"].value, self.widgets["select_backend"].value self.widgets["select_backend"].value]
]
timestamps = runs["timestamp"] timestamps = runs["timestamp"]
x_series, x_metadata = self.gen_x_series(timestamps.sort_values()) x_series, x_metadata = self.gen_x_series(timestamps.sort_values())
# Update source # Update source
main_dict = runs.to_dict("series") main_dict = runs.to_dict("series")
@ -93,7 +89,6 @@ class CompareRuns:
n = self.current_n_runs n = self.current_n_runs
main_dict = {key: value[-n:] for key, value in main_dict.items()} main_dict = {key: value[-n:] for key, value in main_dict.items()}
# Generate ColumnDataSources for the 3 dotplots # Generate ColumnDataSources for the 3 dotplots
for stat in ["sigma", "s10", "s2"]: for stat in ["sigma", "s10", "s2"]:
dict = { dict = {
@ -111,18 +106,20 @@ class CompareRuns:
} }
if stat == "s10" or stat == "s2": if stat == "s10" or stat == "s2":
dict["%s_lower_bound" % stat] = main_dict["%s_lower_bound" % stat] dict["%s_lower_bound" %
stat] = main_dict["%s_lower_bound" %
stat]
# Filter outliers if the box is checked # Filter outliers if the box is checked
if len(self.widgets["outliers_filtering_compare"].active) > 0: if len(self.widgets["outliers_filtering_compare"].active) > 0:
outliers = helper.detect_outliers(dict[stat]) outliers = helper.detect_outliers(dict[stat])
dict[stat] = helper.remove_outliers(dict[stat], outliers) dict[stat] = helper.remove_outliers(dict[stat], outliers)
dict["%s_x" % stat] = helper.remove_outliers(dict["%s_x" % stat], outliers) dict["%s_x" % stat] = helper.remove_outliers(
dict["%s_x" % stat], outliers)
# Assign ColumnDataSource # Assign ColumnDataSource
self.sources["%s_source" % stat].data = dict self.sources["%s_source" % stat].data = dict
# Generate ColumnDataSource for the boxplot # Generate ColumnDataSource for the boxplot
dict = { dict = {
"is_git_commit": main_dict["is_git_commit"], "is_git_commit": main_dict["is_git_commit"],
@ -143,29 +140,37 @@ class CompareRuns:
"nsamples": main_dict["nsamples"] "nsamples": main_dict["nsamples"]
} }
self.sources["boxplot_source"].data = dict self.sources["boxplot_source"].data = dict
# Update x axis
# Update x_ranges helper.reset_x_range(
helper.reset_x_range(self.plots["boxplot"], self.sources["boxplot_source"].data["x"]) self.plots["boxplot"],
helper.reset_x_range(self.plots["sigma_plot"], self.sources["sigma_source"].data["sigma_x"]) self.sources["boxplot_source"].data["x"]
helper.reset_x_range(self.plots["s10_plot"], self.sources["s10_source"].data["s10_x"]) )
helper.reset_x_range(self.plots["s2_plot"], self.sources["s2_source"].data["s2_x"]) helper.reset_x_range(
self.plots["sigma_plot"],
self.sources["sigma_source"].data["sigma_x"]
)
helper.reset_x_range(
self.plots["s10_plot"],
self.sources["s10_source"].data["s10_x"]
)
helper.reset_x_range(
self.plots["s2_plot"],
self.sources["s2_source"].data["s2_x"]
)
# Widgets' callback functions # Widgets' callback functions
def update_test(self, attrname, old, new): def update_test(self, attrname, old, new):
# If the value is updated by the CustomJS, self.widgets["select_var"].value # If the value is updated by the CustomJS, self.widgets["select_var"].value
# won't be updated, so we have to look for that case and assign it manually # won't be updated, so we have to look for that case and assign it
# manually
# "new" should be a list when updated by CustomJS # "new" should be a list when updated by CustomJS
if type(new) == list: if isinstance(new, list):
# If filtering removed all options, we might have an empty list # If filtering removed all options, we might have an empty list
# (in this case, we just skip the callback and do nothing) # (in this case, we just skip the callback and do nothing)
if len(new) > 0: if len(new) > 0:
@ -183,7 +188,6 @@ class CompareRuns:
.index.get_level_values("variable").drop_duplicates().tolist() .index.get_level_values("variable").drop_duplicates().tolist()
self.widgets["select_var"].options = self.vars self.widgets["select_var"].options = self.vars
# Reset var selection if old one is not available in new vars # Reset var selection if old one is not available in new vars
if self.widgets["select_var"].value not in self.vars: if self.widgets["select_var"].value not in self.vars:
self.widgets["select_var"].value = self.vars[0] self.widgets["select_var"].value = self.vars[0]
@ -194,14 +198,14 @@ class CompareRuns:
# anyway) # anyway)
self.update_var("", "", self.widgets["select_var"].value) self.update_var("", "", self.widgets["select_var"].value)
def update_var(self, attrname, old, new): def update_var(self, attrname, old, new):
# If the value is updated by the CustomJS, self.widgets["select_var"].value # If the value is updated by the CustomJS, self.widgets["select_var"].value
# won't be updated, so we have to look for that case and assign it manually # won't be updated, so we have to look for that case and assign it
# manually
# new should be a list when updated by CustomJS # new should be a list when updated by CustomJS
if type(new) == list: if isinstance(new, list):
new = new[0] new = new[0]
if new != self.widgets["select_var"].value: if new != self.widgets["select_var"].value:
@ -209,7 +213,6 @@ class CompareRuns:
self.widgets["select_var"].value = new self.widgets["select_var"].value = new
return return
# New list of available backends # New list of available backends
self.backends = self.data.loc[self.widgets["select_test"].value, self.widgets["select_var"].value]\ self.backends = self.data.loc[self.widgets["select_test"].value, self.widgets["select_var"].value]\
.index.get_level_values("vfc_backend").drop_duplicates().tolist() .index.get_level_values("vfc_backend").drop_duplicates().tolist()
@ -225,13 +228,11 @@ class CompareRuns:
# anyway) # anyway)
self.update_backend("", "", self.widgets["select_backend"].value) self.update_backend("", "", self.widgets["select_backend"].value)
def update_backend(self, attrname, old, new): def update_backend(self, attrname, old, new):
# Simply update plots, since no other data is affected # Simply update plots, since no other data is affected
self.update_plots() self.update_plots()
def update_n_runs(self, attrname, old, new): def update_n_runs(self, attrname, old, new):
# Simply update runs selection (value and string display) # Simply update runs selection (value and string display)
self.select_n_runs.value = new self.select_n_runs.value = new
@ -239,12 +240,9 @@ class CompareRuns:
self.update_plots() self.update_plots()
def update_outliers_filtering(self, attrname, old, new): def update_outliers_filtering(self, attrname, old, new):
self.update_plots() self.update_plots()
# Bokeh setup functions # Bokeh setup functions
def setup_plots(self): def setup_plots(self):
@ -256,7 +254,6 @@ class CompareRuns:
# (defined inside template to avoid bloating server w/ too much JS code) # (defined inside template to avoid bloating server w/ too much JS code)
js_tap_callback = "goToInspectRuns();" js_tap_callback = "goToInspectRuns();"
# Box plot # Box plot
self.plots["boxplot"] = figure( self.plots["boxplot"] = figure(
name="boxplot", title="Variable distribution over runs", name="boxplot", title="Variable distribution over runs",
@ -297,7 +294,6 @@ class CompareRuns:
) )
self.doc.add_root(self.plots["boxplot"]) self.doc.add_root(self.plots["boxplot"])
# Sigma plot (bar plot) # Sigma plot (bar plot)
self.plots["sigma_plot"] = figure( self.plots["sigma_plot"] = figure(
name="sigma_plot", title="Standard deviation σ over runs", name="sigma_plot", title="Standard deviation σ over runs",
@ -324,7 +320,6 @@ class CompareRuns:
) )
self.doc.add_root(self.plots["sigma_plot"]) self.doc.add_root(self.plots["sigma_plot"])
# s plot (bar plot with 2 tabs) # s plot (bar plot with 2 tabs)
self.plots["s10_plot"] = figure( self.plots["s10_plot"] = figure(
name="s10_plot", title="Significant digits s over runs", name="s10_plot", title="Significant digits s over runs",
@ -353,7 +348,6 @@ class CompareRuns:
) )
s10_tab = Panel(child=self.plots["s10_plot"], title="Base 10") s10_tab = Panel(child=self.plots["s10_plot"], title="Base 10")
self.plots["s2_plot"] = figure( self.plots["s2_plot"] = figure(
name="s2_plot", title="Significant digits s over runs", name="s2_plot", title="Significant digits s over runs",
plot_width=900, plot_height=400, x_range=[""], plot_width=900, plot_height=400, x_range=[""],
@ -389,7 +383,6 @@ class CompareRuns:
self.doc.add_root(s_tabs) self.doc.add_root(s_tabs)
def setup_widgets(self): def setup_widgets(self):
# Initial selections # Initial selections
@ -404,13 +397,11 @@ class CompareRuns:
self.backends = self.data.loc[self.tests[0], self.vars[0]]\ self.backends = self.data.loc[self.tests[0], self.vars[0]]\
.index.get_level_values("vfc_backend").drop_duplicates().tolist() .index.get_level_values("vfc_backend").drop_duplicates().tolist()
# Custom JS callback that will be used client side to filter selections # Custom JS callback that will be used client side to filter selections
filter_callback_js = """ filter_callback_js = """
selector.options = options.filter(e => e.includes(cb_obj.value)); selector.options = options.filter(e => e.includes(cb_obj.value));
""" """
# Test selector widget # Test selector widget
# Number of runs to display # Number of runs to display
@ -442,13 +433,15 @@ class CompareRuns:
self.widgets["test_filter"] = TextInput( self.widgets["test_filter"] = TextInput(
name="test_filter", title="Tests filter:" name="test_filter", title="Tests filter:"
) )
self.widgets["test_filter"].js_on_change("value", CustomJS( self.widgets["test_filter"].js_on_change(
args=dict(options=self.tests, selector=self.widgets["select_test"]), "value",
code=filter_callback_js CustomJS(
)) args=dict(
options=self.tests,
selector=self.widgets["select_test"]),
code=filter_callback_js))
self.doc.add_root(self.widgets["test_filter"]) self.doc.add_root(self.widgets["test_filter"])
# Number of runs to display # Number of runs to display
self.widgets["select_n_runs"] = Select( self.widgets["select_n_runs"] = Select(
@ -458,7 +451,6 @@ class CompareRuns:
self.doc.add_root(self.widgets["select_n_runs"]) self.doc.add_root(self.widgets["select_n_runs"])
self.widgets["select_n_runs"].on_change("value", self.update_n_runs) self.widgets["select_n_runs"].on_change("value", self.update_n_runs)
# Variable selector widget # Variable selector widget
self.widgets["select_var"] = Select( self.widgets["select_var"] = Select(
@ -469,7 +461,6 @@ class CompareRuns:
self.widgets["select_var"].on_change("value", self.update_var) self.widgets["select_var"].on_change("value", self.update_var)
self.widgets["select_var"].on_change("options", self.update_var) self.widgets["select_var"].on_change("options", self.update_var)
# Backend selector widget # Backend selector widget
self.widgets["select_backend"] = Select( self.widgets["select_backend"] = Select(
@ -479,7 +470,6 @@ class CompareRuns:
self.doc.add_root(self.widgets["select_backend"]) self.doc.add_root(self.widgets["select_backend"])
self.widgets["select_backend"].on_change("value", self.update_backend) self.widgets["select_backend"].on_change("value", self.update_backend)
# Outliers filtering checkbox # Outliers filtering checkbox
self.widgets["outliers_filtering_compare"] = CheckboxGroup( self.widgets["outliers_filtering_compare"] = CheckboxGroup(
@ -490,12 +480,11 @@ class CompareRuns:
self.widgets["outliers_filtering_compare"]\ self.widgets["outliers_filtering_compare"]\
.on_change("active", self.update_outliers_filtering) .on_change("active", self.update_outliers_filtering)
# Communication methods # Communication methods
# (to send/receive messages to/from master) # (to send/receive messages to/from master)
# Callback to change view of Inspect runs when data is selected # Callback to change view of Inspect runs when data is selected
def inspect_run_callback(self, new, source_name, x_name): def inspect_run_callback(self, new, source_name, x_name):
# In case we just unselected everything, then do nothing # In case we just unselected everything, then do nothing
@ -507,7 +496,6 @@ class CompareRuns:
self.master.go_to_inspect(run_name) self.master.go_to_inspect(run_name)
# Wrappers for each plot (since new is the index of the clicked element, # Wrappers for each plot (since new is the index of the clicked element,
# it is dependent of the plot because we could have filtered some outliers) # it is dependent of the plot because we could have filtered some outliers)
# There doesn't seem to be an easy way to add custom parameters to a # There doesn't seem to be an easy way to add custom parameters to a
@ -525,7 +513,6 @@ class CompareRuns:
def inspect_run_callback_s10(self, attr, old, new): def inspect_run_callback_s10(self, attr, old, new):
self.inspect_run_callback(new, "s10_source", "s10_x") self.inspect_run_callback(new, "s10_source", "s10_x")
# Constructor # Constructor
def __init__(self, master, doc, data, metadata): def __init__(self, master, doc, data, metadata):
@ -536,7 +523,6 @@ class CompareRuns:
self.data = data self.data = data
self.metadata = metadata self.metadata = metadata
self.sources = { self.sources = {
"boxplot_source": ColumnDataSource(data={}), "boxplot_source": ColumnDataSource(data={}),
"sigma_source": ColumnDataSource(data={}), "sigma_source": ColumnDataSource(data={}),

View File

@ -10,7 +10,7 @@ import numpy as np
max_ticks = 15 max_ticks = 15
max_zscore = 3 max_zscore = 3
################################################################################ ##########################################################################
# From a timestamp, return the associated metadata as a Pandas serie # From a timestamp, return the associated metadata as a Pandas serie
@ -39,7 +39,6 @@ def get_run_name(timestamp, hash):
now = calendar.timegm(gmt) now = calendar.timegm(gmt)
diff = now - timestamp diff = now - timestamp
# Special case : < 1 minute (return string directly) # Special case : < 1 minute (return string directly)
if diff < 60: if diff < 60:
str = "Less than a minute ago" str = "Less than a minute ago"
@ -83,12 +82,10 @@ def get_run_name(timestamp, hash):
str = str % (n, plural) str = str % (n, plural)
# We might want to add the git hash # We might want to add the git hash
if hash != "": if hash != "":
str = str + " (%s)" % hash str = str + " (%s)" % hash
# Finally, check for duplicate with previously generated string # Finally, check for duplicate with previously generated string
if str == get_run_name.previous: if str == get_run_name.previous:
# Increment the duplicate counter and add it to str # Increment the duplicate counter and add it to str
@ -96,12 +93,14 @@ def get_run_name(timestamp, hash):
str = "%s (%s)" % (str, get_run_name.counter) str = "%s (%s)" % (str, get_run_name.counter)
else: else:
# No duplicate, reset both previously generated str and duplicate counter # No duplicate, reset both previously generated str and duplicate
# counter
get_run_name.counter = 0 get_run_name.counter = 0
get_run_name.previous = str get_run_name.previous = str
return str return str
# These external variables will store data about the last generated string to # These external variables will store data about the last generated string to
# avoid duplicates (assuming the runs are sorted by time) # avoid duplicates (assuming the runs are sorted by time)
get_run_name.counter = 0 get_run_name.counter = 0
@ -156,11 +155,16 @@ def remove_boxplot_outliers(dict, outliers, prefix):
dict["%s_x" % prefix] = remove_outliers(dict["%s_x" % prefix], outliers) dict["%s_x" % prefix] = remove_outliers(dict["%s_x" % prefix], outliers)
dict["%s_min" % prefix] = remove_outliers(dict["%s_min" % prefix], outliers) dict["%s_min" % prefix] = remove_outliers(
dict["%s_quantile25" % prefix] = remove_outliers(dict["%s_quantile25" % prefix], outliers) dict["%s_min" % prefix], outliers)
dict["%s_quantile50" % prefix] = remove_outliers(dict["%s_quantile50" % prefix], outliers) dict["%s_quantile25" % prefix] = remove_outliers(
dict["%s_quantile75" % prefix] = remove_outliers(dict["%s_quantile75" % prefix], outliers) dict["%s_quantile25" % prefix], outliers)
dict["%s_max" % prefix] = remove_outliers(dict["%s_max" % prefix], outliers) dict["%s_quantile50" % prefix] = remove_outliers(
dict["%s_quantile50" % prefix], outliers)
dict["%s_quantile75" % prefix] = remove_outliers(
dict["%s_quantile75" % prefix], outliers)
dict["%s_max" % prefix] = remove_outliers(
dict["%s_max" % prefix], outliers)
dict["%s_mu" % prefix] = remove_outliers(dict["%s_mu" % prefix], outliers) dict["%s_mu" % prefix] = remove_outliers(dict["%s_mu" % prefix], outliers)
dict["nsamples"] = remove_outliers(dict["nsamples"], outliers) dict["nsamples"] = remove_outliers(dict["nsamples"], outliers)

View File

@ -15,8 +15,7 @@ import helper
import plot import plot
################################################################################ ##########################################################################
class InspectRuns: class InspectRuns:
@ -40,7 +39,6 @@ class InspectRuns:
return runs_dict return runs_dict
def gen_boxplot_tooltips(self, prefix): def gen_boxplot_tooltips(self, prefix):
return [ return [
("Name", "@%s_x" % prefix), ("Name", "@%s_x" % prefix),
@ -63,26 +61,27 @@ class InspectRuns:
"@%s_mu" % prefix: "printf" "@%s_mu" % prefix: "printf"
} }
# Data processing helper # Data processing helper
# (computes new distributions for sigma, s2, s10) # (computes new distributions for sigma, s2, s10)
def data_processing(self, dataframe): def data_processing(self, dataframe):
# Compute aggragated mu # Compute aggragated mu
dataframe["mu"] = np.vectorize(np.average)(dataframe["mu"], weights=dataframe["nsamples"]) dataframe["mu"] = np.vectorize(
np.average)(
dataframe["mu"],
weights=dataframe["nsamples"])
# nsamples is the number of aggregated elements (as well as the number # nsamples is the number of aggregated elements (as well as the number
# of samples for our new sigma and s distributions) # of samples for our new sigma and s distributions)
dataframe["nsamples"] = dataframe["nsamples"].apply(lambda x: len(x)) dataframe["nsamples"] = dataframe["nsamples"].apply(lambda x: len(x))
dataframe["mu_x"] = dataframe.index dataframe["mu_x"] = dataframe.index
# Make sure that strings don't excede a certain length # Make sure that strings don't excede a certain length
dataframe["mu_x"] = dataframe["mu_x"].apply( dataframe["mu_x"] = dataframe["mu_x"].apply(
lambda x: x[:17] + "[...]" + x[-17:] if len(x) > 39 else x lambda x: x[:17] + "[...]" + x[-17:] if len(x) > 39 else x
) )
# Get quantiles and mu for sigma, s10, s2 # Get quantiles and mu for sigma, s10, s2
for prefix in ["sigma", "s10", "s2"]: for prefix in ["sigma", "s10", "s2"]:
@ -91,18 +90,18 @@ class InspectRuns:
dataframe[prefix] = dataframe[prefix].apply(np.sort) dataframe[prefix] = dataframe[prefix].apply(np.sort)
dataframe["%s_min" % prefix] = dataframe[prefix].apply(np.min) dataframe["%s_min" % prefix] = dataframe[prefix].apply(np.min)
dataframe["%s_quantile25" % prefix] = dataframe[prefix].apply(np.quantile, args=(0.25,)) dataframe["%s_quantile25" % prefix] = dataframe[prefix].apply(
dataframe["%s_quantile50" % prefix] = dataframe[prefix].apply(np.quantile, args=(0.50,)) np.quantile, args=(0.25,))
dataframe["%s_quantile75" % prefix] = dataframe[prefix].apply(np.quantile, args=(0.75,)) dataframe["%s_quantile50" % prefix] = dataframe[prefix].apply(
np.quantile, args=(0.50,))
dataframe["%s_quantile75" % prefix] = dataframe[prefix].apply(
np.quantile, args=(0.75,))
dataframe["%s_max" % prefix] = dataframe[prefix].apply(np.max) dataframe["%s_max" % prefix] = dataframe[prefix].apply(np.max)
dataframe["%s_mu" % prefix] = dataframe[prefix].apply(np.average) dataframe["%s_mu" % prefix] = dataframe[prefix].apply(np.average)
del dataframe[prefix] del dataframe[prefix]
return dataframe return dataframe
# Plots update function # Plots update function
def update_plots(self): def update_plots(self):
@ -117,7 +116,6 @@ class InspectRuns:
] ]
filterby = self.factors_dict[filterby_display] filterby = self.factors_dict[filterby_display]
# Groupby and aggregate lines belonging to the same group in lists # Groupby and aggregate lines belonging to the same group in lists
groups = self.run_data[ groups = self.run_data[
@ -131,17 +129,16 @@ class InspectRuns:
"sigma": lambda x: x.tolist(), "sigma": lambda x: x.tolist(),
"s10": lambda x: x.tolist(), "s10": lambda x: x.tolist(),
"s2": lambda x: x.tolist(), "s2": lambda x: x.tolist(),
"mu": lambda x: x.tolist(), "mu": lambda x: x.tolist(),
# Used for mu weighted average first, then will be replaced # Used for mu weighted average first, then will be replaced
"nsamples": lambda x: x.tolist() "nsamples": lambda x: x.tolist()
}) })
# Compute the new distributions, ... # Compute the new distributions, ...
groups = self.data_processing(groups).to_dict("list") groups = self.data_processing(groups).to_dict("list")
# Update source # Update source
# Assign each ColumnDataSource, starting with the boxplots # Assign each ColumnDataSource, starting with the boxplots
@ -166,7 +163,8 @@ class InspectRuns:
top_outliers = helper.detect_outliers(dict["%s_max" % prefix]) top_outliers = helper.detect_outliers(dict["%s_max" % prefix])
helper.remove_boxplot_outliers(dict, top_outliers, prefix) helper.remove_boxplot_outliers(dict, top_outliers, prefix)
bottom_outliers = helper.detect_outliers(dict["%s_min" % prefix]) bottom_outliers = helper.detect_outliers(
dict["%s_min" % prefix])
helper.remove_boxplot_outliers(dict, bottom_outliers, prefix) helper.remove_boxplot_outliers(dict, bottom_outliers, prefix)
self.sources["%s_source" % prefix].data = dict self.sources["%s_source" % prefix].data = dict
@ -185,8 +183,8 @@ class InspectRuns:
if len(self.widgets["outliers_filtering_inspect"].active) > 0: if len(self.widgets["outliers_filtering_inspect"].active) > 0:
mu_outliers = helper.detect_outliers(groups["mu"]) mu_outliers = helper.detect_outliers(groups["mu"])
groups["mu"] = helper.remove_outliers(groups["mu"], mu_outliers) groups["mu"] = helper.remove_outliers(groups["mu"], mu_outliers)
groups["mu_x"] = helper.remove_outliers(groups["mu_x"], mu_outliers) groups["mu_x"] = helper.remove_outliers(
groups["mu_x"], mu_outliers)
# Update plots axis/titles # Update plots axis/titles
@ -194,42 +192,38 @@ class InspectRuns:
factors_dict = self.factors_dict.copy() factors_dict = self.factors_dict.copy()
del factors_dict[groupby_display] del factors_dict[groupby_display]
del factors_dict[filterby_display] del factors_dict[filterby_display]
over_all = list(factors_dict.keys())[0] for_all = list(factors_dict.keys())[0]
# Update all display strings for plot title (remove caps, plural) # Update all display strings for plot title (remove caps, plural)
groupby_display = groupby_display.lower() groupby_display = groupby_display.lower()
filterby_display = filterby_display.lower()[:-1] filterby_display = filterby_display.lower()[:-1]
over_all = over_all.lower() for_all = for_all.lower()
self.plots["mu_inspect"].title.text = \ self.plots["mu_inspect"].title.text = \
"Empirical average μ of %s (groupped by %s, for all %s)" \ "Empirical average μ of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, over_all) % (filterby_display, groupby_display, for_all)
self.plots["sigma_inspect"].title.text = \ self.plots["sigma_inspect"].title.text = \
"Standard deviation σ of %s (groupped by %s, for all %s)" \ "Standard deviation σ of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, over_all) % (filterby_display, groupby_display, for_all)
self.plots["s10_inspect"].title.text = \ self.plots["s10_inspect"].title.text = \
"Significant digits s of %s (groupped by %s, for all %s)" \ "Significant digits s of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, over_all) % (filterby_display, groupby_display, for_all)
self.plots["s2_inspect"].title.text = \ self.plots["s2_inspect"].title.text = \
"Significant digits s of %s (groupped by %s, for all %s)" \ "Significant digits s of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, over_all) % (filterby_display, groupby_display, for_all)
# Update x_ranges
helper.reset_x_range(self.plots["mu_inspect"], groups["mu_x"]) helper.reset_x_range(self.plots["mu_inspect"], groups["mu_x"])
helper.reset_x_range(self.plots["sigma_inspect"], groups["sigma_x"]) helper.reset_x_range(self.plots["sigma_inspect"], groups["sigma_x"])
helper.reset_x_range(self.plots["s10_inspect"], groups["s10_x"]) helper.reset_x_range(self.plots["s10_inspect"], groups["s10_x"])
helper.reset_x_range(self.plots["s2_inspect"], groups["s2_x"]) helper.reset_x_range(self.plots["s2_inspect"], groups["s2_x"])
# Widets' callback functions # Widets' callback functions
# Run selector callback # Run selector callback
def update_run(self, attrname, old, new): def update_run(self, attrname, old, new):
filterby = self.widgets["filterby_radio"].labels[ filterby = self.widgets["filterby_radio"].labels[
@ -260,8 +254,8 @@ class InspectRuns:
# anyway) # anyway)
self.update_filter("", "", old_value) self.update_filter("", "", old_value)
# "Group by" radio # "Group by" radio
def update_groupby(self, attrname, old, new): def update_groupby(self, attrname, old, new):
# Update "Filter by" radio list # Update "Filter by" radio list
@ -269,7 +263,6 @@ class InspectRuns:
del filterby_list[self.widgets["groupby_radio"].active] del filterby_list[self.widgets["groupby_radio"].active]
self.widgets["filterby_radio"].labels = filterby_list self.widgets["filterby_radio"].labels = filterby_list
filterby = self.widgets["filterby_radio"].labels[ filterby = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active self.widgets["filterby_radio"].active
] ]
@ -292,8 +285,8 @@ class InspectRuns:
# anyway) # anyway)
self.update_filter("", "", old_value) self.update_filter("", "", old_value)
# "Filter by" radio # "Filter by" radio
def update_filterby(self, attrname, old, new): def update_filterby(self, attrname, old, new):
filterby = self.widgets["filterby_radio"].labels[ filterby = self.widgets["filterby_radio"].labels[
@ -318,20 +311,18 @@ class InspectRuns:
# anyway) # anyway)
self.update_filter("", "", old_value) self.update_filter("", "", old_value)
# Filter selector callback # Filter selector callback
def update_filter(self, attrname, old, new): def update_filter(self, attrname, old, new):
self.update_plots() self.update_plots()
# Filter outliers checkbox callback # Filter outliers checkbox callback
def update_outliers_filtering(self, attrname, old, new): def update_outliers_filtering(self, attrname, old, new):
# The status (checked/unchecked) of the checkbox is also verified inside # The status (checked/unchecked) of the checkbox is also verified inside
# self.update_plots(), so calling this function is enough # self.update_plots(), so calling this function is enough
self.update_plots() self.update_plots()
# Bokeh setup functions # Bokeh setup functions
# (for both variable and backend selection at once) # (for both variable and backend selection at once)
@ -339,7 +330,6 @@ class InspectRuns:
tools = "pan, wheel_zoom, xwheel_zoom, ywheel_zoom, reset, save" tools = "pan, wheel_zoom, xwheel_zoom, ywheel_zoom, reset, save"
# Tooltips and formatters # Tooltips and formatters
dotplot_tooltips = [ dotplot_tooltips = [
@ -352,14 +342,16 @@ class InspectRuns:
} }
sigma_boxplot_tooltips = self.gen_boxplot_tooltips("sigma") sigma_boxplot_tooltips = self.gen_boxplot_tooltips("sigma")
sigma_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters("sigma") sigma_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters(
"sigma")
s10_boxplot_tooltips = self.gen_boxplot_tooltips("s10") s10_boxplot_tooltips = self.gen_boxplot_tooltips("s10")
s10_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters("s10") s10_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters(
"s10")
s2_boxplot_tooltips = self.gen_boxplot_tooltips("s2") s2_boxplot_tooltips = self.gen_boxplot_tooltips("s2")
s2_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters("s2") s2_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters(
"s2")
# Plots # Plots
@ -377,7 +369,6 @@ class InspectRuns:
) )
self.doc.add_root(self.plots["mu_inspect"]) self.doc.add_root(self.plots["mu_inspect"])
# Sigma plot # Sigma plot
self.plots["sigma_inspect"] = figure( self.plots["sigma_inspect"] = figure(
name="sigma_inspect", name="sigma_inspect",
@ -386,13 +377,13 @@ class InspectRuns:
tools=tools, sizing_mode="scale_width" tools=tools, sizing_mode="scale_width"
) )
plot.fill_boxplot( plot.fill_boxplot(
self.plots["sigma_inspect"], self.sources["sigma_source"], prefix="sigma", self.plots["sigma_inspect"],
self.sources["sigma_source"],
prefix="sigma",
tooltips=sigma_boxplot_tooltips, tooltips=sigma_boxplot_tooltips,
tooltips_formatters = sigma_boxplot_tooltips_formatters tooltips_formatters=sigma_boxplot_tooltips_formatters)
)
self.doc.add_root(self.plots["sigma_inspect"]) self.doc.add_root(self.plots["sigma_inspect"])
# s plots # s plots
self.plots["s10_inspect"] = figure( self.plots["s10_inspect"] = figure(
name="s10_inspect", name="s10_inspect",
@ -401,11 +392,14 @@ class InspectRuns:
tools=tools, sizing_mode='scale_width' tools=tools, sizing_mode='scale_width'
) )
plot.fill_boxplot( plot.fill_boxplot(
self.plots["s10_inspect"], self.sources["s10_source"], prefix="s10", self.plots["s10_inspect"],
self.sources["s10_source"],
prefix="s10",
tooltips=s10_boxplot_tooltips, tooltips=s10_boxplot_tooltips,
tooltips_formatters = s10_boxplot_tooltips_formatters tooltips_formatters=s10_boxplot_tooltips_formatters)
) s10_tab_inspect = Panel(
s10_tab_inspect = Panel(child=self.plots["s10_inspect"], title="Base 10") child=self.plots["s10_inspect"],
title="Base 10")
self.plots["s2_inspect"] = figure( self.plots["s2_inspect"] = figure(
name="s2_inspect", name="s2_inspect",
@ -426,8 +420,6 @@ class InspectRuns:
) )
self.doc.add_root(s_tabs_inspect) self.doc.add_root(s_tabs_inspect)
def setup_widgets(self): def setup_widgets(self):
# Generation of selectable items # Generation of selectable items
@ -445,7 +437,6 @@ class InspectRuns:
"Tests": "test" "Tests": "test"
} }
# Run selection # Run selection
# Contains all options strings # Contains all options strings
@ -457,7 +448,6 @@ class InspectRuns:
# This contains only entries matching the run # This contains only entries matching the run
self.run_data = self.data[self.data["timestamp"] == self.current_run] self.run_data = self.data[self.data["timestamp"] == self.current_run]
change_run_callback_js = "updateRunMetadata(cb_obj.value);" change_run_callback_js = "updateRunMetadata(cb_obj.value);"
self.widgets["select_run"] = Select( self.widgets["select_run"] = Select(
@ -475,7 +465,6 @@ class InspectRuns:
)) ))
)) ))
# Factors selection # Factors selection
# "Group by" radio # "Group by" radio
@ -491,7 +480,6 @@ class InspectRuns:
self.update_groupby self.update_groupby
) )
# "Filter by" radio # "Filter by" radio
# Get all possible factors, and remove the one selected in "Group by" # Get all possible factors, and remove the one selected in "Group by"
filterby_list = list(self.factors_dict.keys()) filterby_list = list(self.factors_dict.keys())
@ -509,7 +497,6 @@ class InspectRuns:
self.update_filterby self.update_filterby
) )
# Filter selector # Filter selector
filterby = self.widgets["filterby_radio"].labels[ filterby = self.widgets["filterby_radio"].labels[
@ -530,7 +517,6 @@ class InspectRuns:
self.widgets["select_filter"]\ self.widgets["select_filter"]\
.on_change("value", self.update_filter) .on_change("value", self.update_filter)
# Toggle for outliers filtering # Toggle for outliers filtering
self.widgets["outliers_filtering_inspect"] = CheckboxGroup( self.widgets["outliers_filtering_inspect"] = CheckboxGroup(
@ -541,17 +527,14 @@ class InspectRuns:
self.widgets["outliers_filtering_inspect"]\ self.widgets["outliers_filtering_inspect"]\
.on_change("active", self.update_outliers_filtering) .on_change("active", self.update_outliers_filtering)
# Communication methods # Communication methods
# (to send/receive messages to/from master) # (to send/receive messages to/from master)
# When received, switch to the run_name in parameter # When received, switch to the run_name in parameter
def switch_view(self, run_name): def switch_view(self, run_name):
self.widgets["select_run"].value = run_name self.widgets["select_run"].value = run_name
# Constructor # Constructor
def __init__(self, master, doc, data, metadata): def __init__(self, master, doc, data, metadata):
@ -562,7 +545,6 @@ class InspectRuns:
self.data = data self.data = data
self.metadata = metadata self.metadata = metadata
self.sources = { self.sources = {
"mu_source": ColumnDataSource(data={}), "mu_source": ColumnDataSource(data={}),
"sigma_source": ColumnDataSource(data={}), "sigma_source": ColumnDataSource(data={}),

View File

@ -1,5 +1,5 @@
# Look for and read all the run files in the current directory (ending with # Look for and read all the run files in the current directory (ending with
# .vfcrun.hd5), and lanch a Bokeh server for the visualization of this data. # .vfcrunh5), and lanch a Bokeh server for the visualization of this data.
import os import os
import sys import sys
@ -14,18 +14,16 @@ import compare_runs
import inspect_runs import inspect_runs
import helper import helper
################################################################################ ##########################################################################
# Read vfcrun files, and aggregate them in one dataset # Read vfcrun files, and aggregate them in one dataset
run_files = [ f for f in os.listdir(".") if f.endswith(".vfcrun.hd5") ] run_files = [f for f in os.listdir(".") if f.endswith(".vfcrun.h5")]
if len(run_files) == 0: if len(run_files) == 0:
print( print(
"Warning [vfc_ci]: Could not find any vfcrun files in the directory. " \ "Warning [vfc_ci]: Could not find any vfcrun files in the directory. "
"This will result in server errors and prevent you from viewing the report." "This will result in server errors and prevent you from viewing the report.")
)
# These are arrays of Pandas dataframes for now # These are arrays of Pandas dataframes for now
metadata = [] metadata = []
@ -55,12 +53,11 @@ metadata["date"] = metadata.index.to_series().map(
) )
################################################################################ ##########################################################################
curdoc().title = "Verificarlo Report" curdoc().title = "Verificarlo Report"
# Read server arguments # Read server arguments
# (this is quite easy because Bokeh server is called through a wrapper, so # (this is quite easy because Bokeh server is called through a wrapper, so
# we know exactly what the arguments might be) # we know exactly what the arguments might be)
@ -83,7 +80,6 @@ for i in range(1, len(sys.argv)):
address = sys.argv[i + 2] address = sys.argv[i + 2]
url = "" url = ""
# Here, address is either the remote URL or the path to the local Git # Here, address is either the remote URL or the path to the local Git
# repo (depending on the method) # repo (depending on the method)
@ -99,12 +95,11 @@ for i in range(1, len(sys.argv)):
else: else:
raise ValueError( raise ValueError(
"Error [vfc_ci]: The specified method to get the Git " \ "Error [vfc_ci]: The specified method to get the Git "
"repository is invalid. Are you calling Bokeh directly " \ "repository is invalid. Are you calling Bokeh directly "
"instead of using the Verificarlo wrapper ?" "instead of using the Verificarlo wrapper ?"
) )
# At this point, "url" should be set correctly, we can get the repo's # At this point, "url" should be set correctly, we can get the repo's
# URL and name, after making sure we're on a Git URL # URL and name, after making sure we're on a Git URL
@ -113,7 +108,7 @@ for i in range(1, len(sys.argv)):
path = parsed_url.path.split("/") path = parsed_url.path.split("/")
if len(path) < 3: if len(path) < 3:
raise ValueError( raise ValueError(
"Error [vfc_ci]: The found URL doesn't seem to be pointing " \ "Error [vfc_ci]: The found URL doesn't seem to be pointing "
"to a Git repository (path is too short)" "to a Git repository (path is too short)"
) )
@ -122,7 +117,6 @@ for i in range(1, len(sys.argv)):
curdoc().template_variables["repo_url"] = url curdoc().template_variables["repo_url"] = url
curdoc().template_variables["repo_name"] = repo_name curdoc().template_variables["repo_name"] = repo_name
# We should have a "github.com" or a "*gitlab*" URL # We should have a "github.com" or a "*gitlab*" URL
if parsed_url.netloc == "github.com": if parsed_url.netloc == "github.com":
@ -148,8 +142,6 @@ for i in range(1, len(sys.argv)):
git_repo_linked = True git_repo_linked = True
# Look for a logo URL # Look for a logo URL
# If a logo URL is specified, it will be included in the report's header # If a logo URL is specified, it will be included in the report's header
if sys.argv[i] == "logo": if sys.argv[i] == "logo":
@ -162,8 +154,7 @@ curdoc().template_variables["git_repo_linked"] = git_repo_linked
curdoc().template_variables["has_logo"] = has_logo curdoc().template_variables["has_logo"] = has_logo
################################################################################ ##########################################################################
# Setup report views # Setup report views
@ -179,7 +170,6 @@ class ViewsMaster:
def go_to_inspect(self, run_name): def go_to_inspect(self, run_name):
self.inspect.switch_view(run_name) self.inspect.switch_view(run_name)
# Constructor # Constructor
def __init__(self, data, metadata, git_repo_linked, commit_link): def __init__(self, data, metadata, git_repo_linked, commit_link):
@ -190,7 +180,8 @@ class ViewsMaster:
self.commit_link = commit_link self.commit_link = commit_link
# Pass metadata to the template as a JSON string # Pass metadata to the template as a JSON string
curdoc().template_variables["metadata"] = self.metadata.to_json(orient="index") curdoc().template_variables["metadata"] = self.metadata.to_json(
orient="index")
# Runs comparison # Runs comparison
self.compare = compare_runs.CompareRuns( self.compare = compare_runs.CompareRuns(

View File

@ -15,21 +15,19 @@ def fill_dotplot(
): ):
# (Optional) Tooltip and tooltip formatters # (Optional) Tooltip and tooltip formatters
if tooltips != None: if tooltips is not None:
hover = HoverTool(tooltips=tooltips, mode="vline", names=["circle"]) hover = HoverTool(tooltips=tooltips, mode="vline", names=["circle"])
if tooltips_formatters != None: if tooltips_formatters is not None:
hover.formatters = tooltips_formatters hover.formatters = tooltips_formatters
plot.add_tools(hover) plot.add_tools(hover)
# (Optional) Add TapTool (for JS tap callback) # (Optional) Add TapTool (for JS tap callback)
if js_tap_callback != None: if js_tap_callback is not None:
tap = TapTool(callback=CustomJS(code=js_tap_callback)) tap = TapTool(callback=CustomJS(code=js_tap_callback))
plot.add_tools(tap) plot.add_tools(tap)
# (Optional) Add segment to represent a lower bound # (Optional) Add segment to represent a lower bound
if lower_bound: if lower_bound:
lower_segment = plot.segment( lower_segment = plot.segment(
@ -38,24 +36,20 @@ def fill_dotplot(
source=source, line_color="black" source=source, line_color="black"
) )
# Draw dots (actually Bokeh circles) # Draw dots (actually Bokeh circles)
circle = plot.circle( circle = plot.circle(
name="circle", name="circle",
x="%s_x" % data_field, y=data_field, source=source, size=12 x="%s_x" % data_field, y=data_field, source=source, size=12
) )
# (Optional) Draw lines between dots # (Optional) Draw lines between dots
if lines: if lines:
line = plot.line(x="%s_x" % data_field, y=data_field, source=source) line = plot.line(x="%s_x" % data_field, y=data_field, source=source)
# (Optional) Add server tap callback # (Optional) Add server tap callback
if server_tap_callback != None: if server_tap_callback is not None:
circle.data_source.selected.on_change("indices", server_tap_callback) circle.data_source.selected.on_change("indices", server_tap_callback)
# Plot appearance # Plot appearance
plot.xgrid.grid_line_color = None plot.xgrid.grid_line_color = None
plot.ygrid.grid_line_color = None plot.ygrid.grid_line_color = None
@ -67,30 +61,27 @@ def fill_dotplot(
plot.xaxis[0].major_label_orientation = pi / 8 plot.xaxis[0].major_label_orientation = pi / 8
def fill_boxplot( def fill_boxplot(
plot, source, plot, source,
prefix="", prefix="",
tooltips=None, tooltips_formatters=None, tooltips=None, tooltips_formatters=None,
js_tap_callback=None, server_tap_callback=None, js_tap_callback=None, server_tap_callback=None
): ):
# (Optional) Tooltip and tooltip formatters # (Optional) Tooltip and tooltip formatters
if tooltips != None: if tooltips is not None:
hover = HoverTool(tooltips=tooltips, mode="vline", names=["full_box"]) hover = HoverTool(tooltips=tooltips, mode="vline", names=["full_box"])
if tooltips_formatters != None: if tooltips_formatters is not None:
hover.formatters = tooltips_formatters hover.formatters = tooltips_formatters
plot.add_tools(hover) plot.add_tools(hover)
# (Optional) Add TapTool (for JS tap callback) # (Optional) Add TapTool (for JS tap callback)
if js_tap_callback != None: if js_tap_callback is not None:
tap = TapTool(callback=CustomJS(code=js_tap_callback)) tap = TapTool(callback=CustomJS(code=js_tap_callback))
plot.add_tools(tap) plot.add_tools(tap)
# Draw boxes (the prefix argument modifies the fields of ColumnDataSource # Draw boxes (the prefix argument modifies the fields of ColumnDataSource
# that are used) # that are used)
@ -128,18 +119,18 @@ def fill_boxplot(
color="black" color="black"
) )
# (Optional) Add server tap callback # (Optional) Add server tap callback
if server_tap_callback != None: if server_tap_callback is not None:
top_stem.data_source.selected.on_change("indices", server_tap_callback) top_stem.data_source.selected.on_change("indices", server_tap_callback)
bottom_stem.data_source.selected.on_change("indices", server_tap_callback) bottom_stem.data_source.selected.on_change(
"indices", server_tap_callback)
full_box.data_source.selected.on_change("indices", server_tap_callback) full_box.data_source.selected.on_change("indices", server_tap_callback)
bottom_box.data_source.selected.on_change("indices", server_tap_callback) bottom_box.data_source.selected.on_change(
"indices", server_tap_callback)
mu_dot.data_source.selected.on_change("indices", server_tap_callback) mu_dot.data_source.selected.on_change("indices", server_tap_callback)
# Plot appearance # Plot appearance
plot.xgrid.grid_line_color = None plot.xgrid.grid_line_color = None
plot.ygrid.grid_line_color = None plot.ygrid.grid_line_color = None

View File

@ -291,19 +291,19 @@
<!-- PLOTS --> <!-- PLOTS -->
<div class="column is-9"> <div class="column is-9">
<h3 class="title is-3">Plots</h3> <h3 class="title is-3">Plots</h3>
<div class="card plot-card"> <div class="card plot-card" style="z-index: 3;">
{{ embed(roots.s_tabs_inspect) }} {{ embed(roots.s_tabs_inspect) }}
</div> </div>
<br> <br>
<div class="card plot-card"> <div class="card plot-card" style="z-index: 2;">
{{ embed(roots.sigma_inspect) }} {{ embed(roots.sigma_inspect) }}
</div> </div>
<br> <br>
<div class="card plot-card"> <div class="card plot-card" style="z-index: 1;">
{{ embed(roots.mu_inspect) }} {{ embed(roots.mu_inspect) }}
</div> </div>
</div> </div>
@ -325,6 +325,8 @@
</div> </div>
<!-- JAVASCRIPT -->
<script> <script>
// Listen to clicks on breadcrumb (for responsive header) // Listen to clicks on breadcrumb (for responsive header)

View File

@ -29,7 +29,7 @@ run_verificarlo_tests:
- git fetch --all - git fetch --all
- git checkout -b {{ci_branch}} origin/{{ci_branch}} - git checkout -b {{ci_branch}} origin/{{ci_branch}}
- mkdir -p vfcruns - mkdir -p vfcruns
- mv *.vfcrun.hd5 vfcruns - mv *.vfcrun.h5 vfcruns
- git add vfcruns/* - git add vfcruns/*
- git commit -m "[auto] New test results for commit ${git_hash}" - git commit -m "[auto] New test results for commit ${git_hash}"
- git push - git push
@ -39,4 +39,4 @@ run_verificarlo_tests:
artifacts: artifacts:
paths: paths:
- "*.vfcraw.hd5" - "*.vfcraw.h5"

View File

@ -26,7 +26,7 @@ jobs:
- name: Install Python requirements - name: Install Python requirements
run: | run: |
pip install numpy scipy pandas bokeh jinja2 tables GitPython pip install numpy scipy pandas bokeh jinja2 tables GitPython
apt update
apt install wget apt install wget
wget https://raw.githubusercontent.com/verificarlo/significantdigits/main/sigdigits.py -P /usr/local/lib/python3.8/dist-packages wget https://raw.githubusercontent.com/verificarlo/significantdigits/main/sigdigits.py -P /usr/local/lib/python3.8/dist-packages
@ -45,7 +45,7 @@ jobs:
git checkout {{ci_branch}} git checkout {{ci_branch}}
mkdir -p vfcruns mkdir -p vfcruns
mv *.vfcrun.hd5 vfcruns mv *.vfcrun.h5 vfcruns
git add vfcruns/* git add vfcruns/*
git commit -m "[auto] New test results for commit ${git_hash}" git commit -m "[auto] New test results for commit ${git_hash}"
git push git push
@ -54,4 +54,4 @@ jobs:
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
with: with:
{% raw %}name: ${{github.sha}}.vfcraw{% endraw %} {% raw %}name: ${{github.sha}}.vfcraw{% endraw %}
path: ./*.vfcraw.hd5 path: ./*.vfcraw.h5

24
vfc_ci
View File

@ -10,10 +10,11 @@
import argparse import argparse
################################################################################ ##########################################################################
# Parameters validation helpers # Parameters validation helpers
def is_port(string): def is_port(string):
value = int(string) value = int(string)
if value < 0 or value > 65535: if value < 0 or value > 65535:
@ -31,8 +32,7 @@ def is_directory(string):
return string return string
################################################################################ ##########################################################################
# Subcommand decorator # Subcommand decorator
@ -41,6 +41,7 @@ cli = argparse.ArgumentParser(
) )
subparsers = cli.add_subparsers(dest="subcommand") subparsers = cli.add_subparsers(dest="subcommand")
def subcommand(description="", args=[], parent=subparsers): def subcommand(description="", args=[], parent=subparsers):
def decorator(func): def decorator(func):
parser = parent.add_parser(func.__name__, description=description) parser = parent.add_parser(func.__name__, description=description)
@ -54,9 +55,7 @@ def argument(*name_or_flags, **kwargs):
return ([*name_or_flags], kwargs) return ([*name_or_flags], kwargs)
##########################################################################
################################################################################
# "setup" subcommand # "setup" subcommand
@ -76,10 +75,9 @@ def setup(args):
import ci.setup import ci.setup
ci.setup.setup(args.git_host) ci.setup.setup(args.git_host)
# "test" subcommand # "test" subcommand
@subcommand( @subcommand(
description="Execute predefined Verificarlo tests and save their results.", description="Execute predefined Verificarlo tests and save their results.",
args=[ args=[
@ -112,10 +110,9 @@ def test(args):
import ci.test import ci.test
ci.test.run(args.is_git_commit, args.export_raw_results, args.dry_run) ci.test.run(args.is_git_commit, args.export_raw_results, args.dry_run)
# "serve" subcommand # "serve" subcommand
@subcommand( @subcommand(
description=""" description="""
Start a server to visualize Verificarlo test results. Start a server to visualize Verificarlo test results.
@ -174,9 +171,9 @@ def test(args):
def serve(args): def serve(args):
# git_directory and git_url are supposed to be exclusive # git_directory and git_url are supposed to be exclusive
if args.git_directory != None and args.git_url != None: if args.git_directory is not None and args.git_url is not None:
raise argparse.ArgumentTypeError( raise argparse.ArgumentTypeError(
"\"-gd\" / \"--git-directory\" and \"-gu\" / \"--git-url\" are "\ "\"-gd\" / \"--git-directory\" and \"-gu\" / \"--git-url\" are "
"mutually exclusive. Please make sure to use at most one of them." "mutually exclusive. Please make sure to use at most one of them."
) )
@ -191,12 +188,9 @@ def serve(args):
) )
############################################################################### ###############################################################################
# Main command group and entry point # Main command group and entry point
if __name__ == "__main__": if __name__ == "__main__":
args = cli.parse_args() args = cli.parse_args()
if args.subcommand is None: if args.subcommand is None: