Update vfc_ci code

This commit is contained in:
Aurélien Delval 2021-05-03 15:47:44 +02:00
parent d81777e347
commit 44f0fc1f51
20 changed files with 398 additions and 477 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -3,26 +3,26 @@
import os import os
def serve(show, git_directory, git_url, port, allow_origin, logo_url): def serve(show, git_directory, git_url, port, allow_origin, logo_url):
# Prepare arguments # Prepare arguments
show = "--show" if show else "" show = "--show" if show else ""
git = "" git = ""
if git_directory != None: if git_directory is not None:
git = "git directory %s" % git_directory git = "git directory %s" % git_directory
if git_url != None: if git_url is not None:
git = "git url %s" % git_url git = "git url %s" % git_url
logo = "" logo = ""
if logo_url != None: if logo_url is not None:
logo = "logo %s" % logo_url logo = "logo %s" % logo_url
dirname = os.path.dirname(__file__) dirname = os.path.dirname(__file__)
# Call the "bokeh serve" command on the system # Call the "bokeh serve" command on the system
command = "bokeh serve %s/vfc_ci_report %s --allow-websocket-origin=%s:%s --port %s --args %s %s" \ command = "bokeh serve %s/vfc_ci_report %s --allow-websocket-origin=%s:%s --port %s --args %s %s" \
% (dirname, show, allow_origin, port, port, git, logo) % (dirname, show, allow_origin, port, port, git, logo)
os.system(command) os.system(command)

View File

@ -6,10 +6,10 @@ import sys
import os import os
from jinja2 import Environment, FileSystemLoader from jinja2 import Environment, FileSystemLoader
################################################################################ ##########################################################################
# Helper functions
# Helper functions
def gen_readme(dev_branch, ci_branch): def gen_readme(dev_branch, ci_branch):
@ -26,17 +26,16 @@ def gen_readme(dev_branch, ci_branch):
fh.write(render) fh.write(render)
def gen_workflow(git_host, dev_branch, ci_branch, repo): def gen_workflow(git_host, dev_branch, ci_branch, repo):
# Init template loader # Init template loader
path = os.path.dirname(os.path.abspath(__file__)) path = os.path.dirname(os.path.abspath(__file__))
env = Environment(loader=FileSystemLoader(path)) env = Environment(loader=FileSystemLoader(path))
if git_host == "github": if git_host == "github":
# Load template # Load template
template = env.get_template("workflow_templates/vfc_test_workflow.j2.yml") template = env.get_template(
"workflow_templates/vfc_test_workflow.j2.yml")
# Render it # Render it
render = template.render(dev_branch=dev_branch, ci_branch=ci_branch) render = template.render(dev_branch=dev_branch, ci_branch=ci_branch)
@ -47,13 +46,14 @@ def gen_workflow(git_host, dev_branch, ci_branch, repo):
with open(filename, "w") as fh: with open(filename, "w") as fh:
fh.write(render) fh.write(render)
if git_host == "gitlab": if git_host == "gitlab":
template = env.get_template("workflow_templates/gitlab-ci.j2.yml") template = env.get_template("workflow_templates/gitlab-ci.j2.yml")
# Ask for the user who will run the jobs (Gitlab specific) # Ask for the user who will run the jobs (Gitlab specific)
username = input("[vfc_ci] Enter the name of the user who will run the CI jobs:") username = input(
email = input("[vfc_ci] Enter the e-mail of the user who will run the CI jobs:") "[vfc_ci] Enter the name of the user who will run the CI jobs:")
email = input(
"[vfc_ci] Enter the e-mail of the user who will run the CI jobs:")
remote_url = repo.remotes[0].config_reader.get("url") remote_url = repo.remotes[0].config_reader.get("url")
remote_url = remote_url.replace("http://", "") remote_url = remote_url.replace("http://", "")
@ -64,7 +64,7 @@ def gen_workflow(git_host, dev_branch, ci_branch, repo):
ci_branch=ci_branch, ci_branch=ci_branch,
username=username, username=username,
email=email, email=email,
remote_url = remote_url remote_url=remote_url
) )
filename = ".gitlab-ci.yml" filename = ".gitlab-ci.yml"
@ -72,38 +72,35 @@ def gen_workflow(git_host, dev_branch, ci_branch, repo):
fh.write(render) fh.write(render)
##########################################################################
################################################################################
def setup(git_host): def setup(git_host):
# Init repo and make sure that the workflow setup is possible
# Init repo and make sure that the workflow setup is possible
repo = git.Repo(".") repo = git.Repo(".")
repo.remotes.origin.fetch() repo.remotes.origin.fetch()
# Make sure that repository is clean # Make sure that repository is clean
assert(not repo.is_dirty()), "Error [vfc_ci]: Unstaged changes detected " \ assert(not repo.is_dirty()), "Error [vfc_ci]: Unstaged changes detected " \
"in your work tree." "in your work tree."
dev_branch = repo.active_branch dev_branch = repo.active_branch
dev_branch_name = str(dev_branch) dev_branch_name = str(dev_branch)
dev_remote = dev_branch.tracking_branch() dev_remote = dev_branch.tracking_branch()
# Make sure that the active branch (on which to setup the workflow) has a remote # Make sure that the active branch (on which to setup the workflow) has a
assert(dev_remote != None), "Error [vfc_ci]: The current branch doesn't " \ # remote
"have a remote." assert(dev_remote is not None), "Error [vfc_ci]: The current branch doesn't " \
"have a remote."
# Make sure that we are not behind the remote (so we can push safely later) # Make sure that we are not behind the remote (so we can push safely later)
rev = "%s...%s" % (dev_branch_name, str(dev_remote)) rev = "%s...%s" % (dev_branch_name, str(dev_remote))
commits_behind = list(repo.iter_commits(rev)) commits_behind = list(repo.iter_commits(rev))
assert(commits_behind == []), "Error [vfc_ci]: The local branch seems " \ assert(commits_behind == []), "Error [vfc_ci]: The local branch seems " \
"to be at least one commit behind remote." "to be at least one commit behind remote."
# Commit the workflow on the current (dev) branch
# Commit the workflow on the current (dev) branch
ci_branch_name = "vfc_ci_%s" % dev_branch_name ci_branch_name = "vfc_ci_%s" % dev_branch_name
gen_workflow(git_host, dev_branch_name, ci_branch_name, repo) gen_workflow(git_host, dev_branch_name, ci_branch_name, repo)
@ -111,12 +108,10 @@ def setup(git_host):
repo.index.commit("[auto] Set up Verificarlo CI on this branch") repo.index.commit("[auto] Set up Verificarlo CI on this branch")
repo.remote(name="origin").push() repo.remote(name="origin").push()
# Create the CI branch (orphan branch with a readme on it)
# (see : https://github.com/gitpython-developers/GitPython/issues/615)
repo.head.reference = git.Head(repo, "refs/heads/" + ci_branch_name)
# Create the CI branch (orphan branch with a readme on it)
# (see : https://github.com/gitpython-developers/GitPython/issues/615)
repo.head.reference = git.Head(repo, "refs/heads/"+ ci_branch_name)
repo.index.remove(["*"]) repo.index.remove(["*"])
gen_readme(dev_branch_name, ci_branch_name) gen_readme(dev_branch_name, ci_branch_name)
@ -133,23 +128,19 @@ def setup(git_host):
# Force checkout back to the original (dev) branch # Force checkout back to the original (dev) branch
repo.git.checkout(dev_branch_name, force=True) repo.git.checkout(dev_branch_name, force=True)
# Print termination messages
# Print termination messages
print( print(
"Info [vfc_ci]: A Verificarlo CI workflow has been setup on " \ "Info [vfc_ci]: A Verificarlo CI workflow has been setup on "
"%s." % dev_branch_name "%s." % dev_branch_name
) )
print( print(
"Info [vfc_ci]: Make sure that you have a \"vfc_tests_config.json\" on " \ "Info [vfc_ci]: Make sure that you have a \"vfc_tests_config.json\" on "
"this branch. You can also perform a \"vfc_ci test\" dry run before "\ "this branch. You can also perform a \"vfc_ci test\" dry run before "
"pushing other commits." "pushing other commits.")
)
if git_host == "gitlab": if git_host == "gitlab":
print( print(
"Info [vfc_ci]: Since you are using GitLab, make sure that you " \ "Info [vfc_ci]: Since you are using GitLab, make sure that you "
"have created an access token for the user you specified (registered "\ "have created an access token for the user you specified (registered "
"as a variable called \"CI_PUSH_TOKEN\" in your repository)." "as a variable called \"CI_PUSH_TOKEN\" in your repository).")
)

View File

@ -1,6 +1,10 @@
# This script reads the vfc_tests_config.json file and executes tests accordingly # This script reads the vfc_tests_config.json file and executes tests accordingly
# It will also generate a ... .vfcrun.hd5 file with the results of the run # It will also generate a ... .vfcrunh5 file with the results of the run
import sigdigits as sd
import scipy.stats
import numpy as np
import pandas as pd
import os import os
import json import json
@ -12,21 +16,15 @@ import time
import pickle import pickle
pickle.HIGHEST_PROTOCOL = 4 pickle.HIGHEST_PROTOCOL = 4
import pandas as pd
import numpy as np
import scipy.stats
import sigdigits as sd
# Magic numbers # Magic numbers
min_pvalue = 0.05 min_pvalue = 0.05
max_zscore = 3 max_zscore = 3
################################################################################ ##########################################################################
# Helper functions
# Helper functions
# Read a CSV file outputted by vfc_probe as a Pandas dataframe # Read a CSV file outputted by vfc_probe as a Pandas dataframe
def read_probes_csv(filepath, backend, warnings, execution_data): def read_probes_csv(filepath, backend, warnings, execution_data):
@ -36,35 +34,34 @@ def read_probes_csv(filepath, backend, warnings, execution_data):
except FileNotFoundError: except FileNotFoundError:
print( print(
"Warning [vfc_ci]: Probes not found, your code might have crashed " \ "Warning [vfc_ci]: Probes not found, your code might have crashed "
"or you might have forgotten to call vfc_dump_probes" "or you might have forgotten to call vfc_dump_probes"
) )
warnings.append(execution_data) warnings.append(execution_data)
return pd.DataFrame( return pd.DataFrame(
columns = ["test", "variable", "values", "vfc_backend"] columns=["test", "variable", "values", "vfc_backend"]
) )
except Exception: except Exception:
print( print(
"Warning [vfc_ci]: Your probes could not be read for some unknown " \ "Warning [vfc_ci]: Your probes could not be read for some unknown "
"reason" "reason"
) )
warnings.append(execution_data) warnings.append(execution_data)
return pd.DataFrame( return pd.DataFrame(
columns = ["test", "variable", "values", "vfc_backend"] columns=["test", "variable", "values", "vfc_backend"]
) )
if len(results) == 0: if len(results) == 0:
print( print(
"Warning [vfc_ci]: Probes empty, it looks like you have dumped " \ "Warning [vfc_ci]: Probes empty, it looks like you have dumped "
"them without calling vfc_put_probe" "them without calling vfc_put_probe"
) )
warnings.append(execution_data) warnings.append(execution_data)
# Once the CSV has been opened and validated, return its content # Once the CSV has been opened and validated, return its content
results["value"] = results["value"].apply(lambda x: float.fromhex(x)) results["value"] = results["value"].apply(lambda x: float.fromhex(x))
results.rename(columns = {"value":"values"}, inplace = True) results.rename(columns={"value": "values"}, inplace=True)
results["vfc_backend"] = backend results["vfc_backend"] = backend
@ -75,67 +72,61 @@ def read_probes_csv(filepath, backend, warnings, execution_data):
def significant_digits(x): def significant_digits(x):
# In a pandas DF, "values" actually refers to the array of columns, and # If the null hypothesis is rejected, call sigdigits with the General
# not the column named "values" # formula:
distribution = x.values[3]
distribution = distribution.reshape(len(distribution), 1)
# The distribution's empirical average will be used as the reference
mu = np.array([x.mu])
# If the null hypothesis is rejected, call sigdigits with General mode:
if x.pvalue < min_pvalue: if x.pvalue < min_pvalue:
method = sd.Method.General # In a pandas DF, "values" actually refers to the array of columns, and
# not the column named "values"
distribution = x.values[3]
distribution = distribution.reshape(len(distribution), 1)
# The distribution's empirical average will be used as the reference
mu = np.array([x.mu])
s = sd.significant_digits( s = sd.significant_digits(
distribution, distribution,
mu, mu,
precision=sd.Precision.Absolute, precision=sd.Precision.Relative,
method=method method=sd.Method.General,
probability=0.9,
confidence=0.95
) )
# s is returned inside a list
return s[0]
# Else, manually compute sMCA which is equivalent to a 66% confidence interval # Else, manually compute sMCA (Stott-Parker formula)
else: else:
method = sd.Method.CNH return -np.log2(np.absolute(x.sigma / x.mu))
s = sd.significant_digits(
distribution,
mu,
precision=sd.Precision.Absolute,
method=method,
probability=0.66,
confidence=0.66,
)
# s is returned as a size 1 list
return s[0]
def significant_digits_lower_bound(x): def significant_digits_lower_bound(x):
# If the null hypothesis is rejected, no lower bound # If the null hypothesis is rejected, no lower bound
if x.pvalue < min_pvalue: if x.pvalue < min_pvalue:
return x.s2 return x.s2
# Else, the lower bound will be a 95% confidence interval # Else, the lower bound will be computed with p= .9 alpha-1=.95
else:
distribution = x.values[3]
distribution = distribution.reshape(len(distribution), 1)
distribution = x.values[3] mu = np.array([x.mu])
distribution = distribution.reshape(len(distribution), 1)
mu = np.array([x.mu]) s = sd.significant_digits(
distribution,
mu,
precision=sd.Precision.Relative,
method=sd.Method.CNH,
s = sd.significant_digits( probability=0.9,
distribution, confidence=0.95
mu, )
precision=sd.Precision.Absolute,
method=sd.Method.CNH,
)
return s[0] return s[0]
################################################################################
##########################################################################
# Main functions # Main functions
@ -148,13 +139,12 @@ def read_config():
except FileNotFoundError as e: except FileNotFoundError as e:
e.strerror = "Error [vfc_ci]: This file is required to describe the tests "\ e.strerror = "Error [vfc_ci]: This file is required to describe the tests "\
"to run and generate a Verificarlo run file" "to run and generate a Verificarlo run file"
raise e raise e
return json.loads(data) return json.loads(data)
# Set up metadata # Set up metadata
def generate_metadata(is_git_commit): def generate_metadata(is_git_commit):
@ -167,7 +157,6 @@ def generate_metadata(is_git_commit):
"message": "" "message": ""
} }
if is_git_commit: if is_git_commit:
print("Fetching metadata from last commit...") print("Fetching metadata from last commit...")
from git import Repo from git import Repo
@ -179,13 +168,12 @@ def generate_metadata(is_git_commit):
metadata["hash"] = str(head_commit)[0:7] metadata["hash"] = str(head_commit)[0:7]
metadata["author"] = "%s <%s>" \ metadata["author"] = "%s <%s>" \
% (str(head_commit.author), head_commit.author.email) % (str(head_commit.author), head_commit.author.email)
metadata["message"] = head_commit.message.split("\n")[0] metadata["message"] = head_commit.message.split("\n")[0]
return metadata return metadata
# Execute tests and collect results in a Pandas dataframe (+ dataprocessing) # Execute tests and collect results in a Pandas dataframe (+ dataprocessing)
def run_tests(config): def run_tests(config):
@ -204,10 +192,10 @@ def run_tests(config):
# not get any data # not get any data
warnings = [] warnings = []
# Tests execution loop # Tests execution loop
for executable in config["executables"]: for executable in config["executables"]:
print("Info [vfc_ci]: Running executable :", executable["executable"], "...") print("Info [vfc_ci]: Running executable :",
executable["executable"], "...")
parameters = "" parameters = ""
if "parameters" in executable: if "parameters" in executable:
@ -245,26 +233,23 @@ def run_tests(config):
n_files = n_files + 1 n_files = n_files + 1
# Clean CSV output files (by deleting the tmp folder) # Clean CSV output files (by deleting the tmp folder)
os.system("rm -rf .vfcruns.tmp") os.system("rm -rf .vfcruns.tmp")
# Combine all separate executions in one dataframe # Combine all separate executions in one dataframe
data = pd.concat(data, sort=False, ignore_index=True) data = pd.concat(data, sort=False, ignore_index=True)
data = data.groupby(["test", "vfc_backend", "variable"])\ data = data.groupby(["test", "vfc_backend", "variable"])\
.values.apply(list).reset_index() .values.apply(list).reset_index()
# Make sure we have some data to work on # Make sure we have some data to work on
assert(len(data) != 0), "Error [vfc_ci]: No data have been generated " \ assert(len(data) != 0), "Error [vfc_ci]: No data have been generated " \
"by your tests executions, aborting run without writing results file" "by your tests executions, aborting run without writing results file"
return data, warnings return data, warnings
# Data processing # Data processing
def data_processing(data): def data_processing(data):
data["values"] = data["values"].apply(lambda x: np.array(x).astype(float)) data["values"] = data["values"].apply(lambda x: np.array(x).astype(float))
@ -272,8 +257,8 @@ def data_processing(data):
# Get empirical average, standard deviation and p-value # Get empirical average, standard deviation and p-value
data["mu"] = data["values"].apply(np.average) data["mu"] = data["values"].apply(np.average)
data["sigma"] = data["values"].apply(np.std) data["sigma"] = data["values"].apply(np.std)
data["pvalue"] = data["values"].apply(lambda x: scipy.stats.shapiro(x).pvalue) data["pvalue"] = data["values"].apply(
lambda x: scipy.stats.shapiro(x).pvalue)
# Significant digits # Significant digits
data["s2"] = data.apply(significant_digits, axis=1) data["s2"] = data.apply(significant_digits, axis=1)
@ -281,8 +266,8 @@ def data_processing(data):
# Lower bound of the confidence interval using the sigdigits module # Lower bound of the confidence interval using the sigdigits module
data["s2_lower_bound"] = data.apply(significant_digits_lower_bound, axis=1) data["s2_lower_bound"] = data.apply(significant_digits_lower_bound, axis=1)
data["s10_lower_bound"] = data["s2_lower_bound"].apply(lambda x: sd.change_base(x, 10)) data["s10_lower_bound"] = data["s2_lower_bound"].apply(
lambda x: sd.change_base(x, 10))
# Compute moments of the distribution # Compute moments of the distribution
# (including a new distribution obtained by filtering outliers) # (including a new distribution obtained by filtering outliers)
@ -297,13 +282,13 @@ def data_processing(data):
data["nsamples"] = data["values"].apply(len) data["nsamples"] = data["values"].apply(len)
# Display all executions that resulted in a warning # Display all executions that resulted in a warning
def show_warnings(warnings): def show_warnings(warnings):
if len(warnings) > 0: if len(warnings) > 0:
print( print(
"Warning [vfc_ci]: Some of your runs could not generate any data " \ "Warning [vfc_ci]: Some of your runs could not generate any data "
"(for instance because your code crashed) and resulted in " "(for instance because your code crashed) and resulted in "
"warnings. Here is the complete list :" "warnings. Here is the complete list :"
) )
@ -316,9 +301,7 @@ def show_warnings(warnings):
print(" Repetition: %s" % warnings[i]["repetition"]) print(" Repetition: %s" % warnings[i]["repetition"])
##########################################################################
################################################################################
# Entry point # Entry point
@ -334,54 +317,51 @@ def run(is_git_commit, export_raw_values, dry_run):
data, warnings = run_tests(config) data, warnings = run_tests(config)
show_warnings(warnings) show_warnings(warnings)
# Data processing # Data processing
print("Info [vfc_ci]: Processing data...") print("Info [vfc_ci]: Processing data...")
data_processing(data) data_processing(data)
# Prepare data for export (by creating a proper index and linking run
# Prepare data for export (by creating a proper index and linking run timestamp) # timestamp)
data = data.set_index(["test", "variable", "vfc_backend"]).sort_index() data = data.set_index(["test", "variable", "vfc_backend"]).sort_index()
data["timestamp"] = metadata["timestamp"] data["timestamp"] = metadata["timestamp"]
filename = metadata["hash"] if is_git_commit else str(metadata["timestamp"]) filename = metadata["hash"] if is_git_commit else str(
metadata["timestamp"])
# Prepare metadata for export # Prepare metadata for export
metadata = pd.DataFrame.from_dict([metadata]) metadata = pd.DataFrame.from_dict([metadata])
metadata = metadata.set_index("timestamp") metadata = metadata.set_index("timestamp")
# NOTE : Exporting to HDF5 requires to install "tables" on the system # NOTE : Exporting to HDF5 requires to install "tables" on the system
# Export raw data if needed # Export raw data if needed
if export_raw_values and not dry_run: if export_raw_values and not dry_run:
data.to_hdf(filename + ".vfcraw.hd5", key="data") data.to_hdf(filename + ".vfcraw.h5", key="data")
metadata.to_hdf(filename + ".vfcraw.hd5", key="metadata") metadata.to_hdf(filename + ".vfcraw.h5", key="metadata")
# Export data # Export data
del data["values"] del data["values"]
if not dry_run: if not dry_run:
data.to_hdf(filename + ".vfcrun.hd5", key="data") data.to_hdf(filename + ".vfcrun.h5", key="data")
metadata.to_hdf(filename + ".vfcrun.hd5", key="metadata") metadata.to_hdf(filename + ".vfcrun.h5", key="metadata")
# Print termination messages # Print termination messages
print( print(
"Info [vfc_ci]: The results have been successfully written to " \ "Info [vfc_ci]: The results have been successfully written to "
"%s.vfcrun.hd5." \ "%s.vfcrun.h5."
% filename % filename
) )
if export_raw_values: if export_raw_values:
print( print(
"Info [vfc_ci]: A file containing the raw values has also been " \ "Info [vfc_ci]: A file containing the raw values has also been "
"created : %s.vfcraw.hd5." "created : %s.vfcraw.h5."
% filename % filename
) )
if dry_run: if dry_run:
print( print(
"Info [vfc_ci]: The dry run flag was enabled, so no files were " \ "Info [vfc_ci]: The dry run flag was enabled, so no files were "
"actually created." "actually created."
) )

View File

@ -9,19 +9,18 @@ from math import pi
from bokeh.plotting import figure, curdoc from bokeh.plotting import figure, curdoc
from bokeh.embed import components from bokeh.embed import components
from bokeh.models import Select, ColumnDataSource, Panel, Tabs, HoverTool, \ from bokeh.models import Select, ColumnDataSource, Panel, Tabs, HoverTool, \
TextInput, CheckboxGroup, TapTool, CustomJS TextInput, CheckboxGroup, TapTool, CustomJS
import helper import helper
import plot import plot
################################################################################ ##########################################################################
class CompareRuns: class CompareRuns:
# Helper functions related to CompareRuns # Helper functions related to CompareRuns
# From an array of timestamps, returns the array of runs names (for the x # From an array of timestamps, returns the array of runs names (for the x
# axis ticks), as well as the metadata (in a dict of arrays) associated to # axis ticks), as well as the metadata (in a dict of arrays) associated to
@ -29,13 +28,13 @@ class CompareRuns:
def gen_x_series(self, timestamps): def gen_x_series(self, timestamps):
# Initialize the objects to return # Initialize the objects to return
x_series= [] x_series = []
x_metadata = dict( x_metadata = dict(
date = [], date=[],
is_git_commit = [], is_git_commit=[],
hash = [], hash=[],
author = [], author=[],
message = [] message=[]
) )
# n == 0 means we want all runs, we also make sure not to go out of # n == 0 means we want all runs, we also make sure not to go out of
@ -44,44 +43,41 @@ class CompareRuns:
if n == 0 or n > len(timestamps): if n == 0 or n > len(timestamps):
n = len(timestamps) n = len(timestamps)
for i in range(0, n): for i in range(0, n):
# Get metadata associated to this run # Get metadata associated to this run
row_metadata = helper.get_metadata(self.metadata, timestamps[-i-1]) row_metadata = helper.get_metadata(
date = time.ctime(timestamps[-i-1]) self.metadata, timestamps[-i - 1])
date = time.ctime(timestamps[-i - 1])
# Fill the x series # Fill the x series
str = row_metadata["name"] str = row_metadata["name"]
x_series.insert(0, helper.get_metadata(self.metadata, timestamps[-i-1])["name"]) x_series.insert(0, helper.get_metadata(
self.metadata, timestamps[-i - 1])["name"])
# Fill the metadata lists # Fill the metadata lists
x_metadata["date"].insert(0, date) x_metadata["date"].insert(0, date)
x_metadata["is_git_commit"].insert(0, row_metadata["is_git_commit"]) x_metadata["is_git_commit"].insert(
0, row_metadata["is_git_commit"])
x_metadata["hash"].insert(0, row_metadata["hash"]) x_metadata["hash"].insert(0, row_metadata["hash"])
x_metadata["author"].insert(0, row_metadata["author"]) x_metadata["author"].insert(0, row_metadata["author"])
x_metadata["message"].insert(0, row_metadata["message"]) x_metadata["message"].insert(0, row_metadata["message"])
return x_series, x_metadata return x_series, x_metadata
# Plots update function # Plots update function
def update_plots(self): def update_plots(self):
# Select all data matching current test/var/backend # Select all data matching current test/var/backend
runs = self.data.loc[ runs = self.data.loc[[self.widgets["select_test"].value],
[self.widgets["select_test"].value], self.widgets["select_var"].value,
self.widgets["select_var"].value, self.widgets["select_backend"].value self.widgets["select_backend"].value]
]
timestamps = runs["timestamp"] timestamps = runs["timestamp"]
x_series, x_metadata = self.gen_x_series(timestamps.sort_values()) x_series, x_metadata = self.gen_x_series(timestamps.sort_values())
# Update source
# Update source
main_dict = runs.to_dict("series") main_dict = runs.to_dict("series")
main_dict["x"] = x_series main_dict["x"] = x_series
@ -91,8 +87,7 @@ class CompareRuns:
# Select the last n runs only # Select the last n runs only
n = self.current_n_runs n = self.current_n_runs
main_dict = {key:value[-n:] for key, value in main_dict.items()} main_dict = {key: value[-n:] for key, value in main_dict.items()}
# Generate ColumnDataSources for the 3 dotplots # Generate ColumnDataSources for the 3 dotplots
for stat in ["sigma", "s10", "s2"]: for stat in ["sigma", "s10", "s2"]:
@ -111,18 +106,20 @@ class CompareRuns:
} }
if stat == "s10" or stat == "s2": if stat == "s10" or stat == "s2":
dict["%s_lower_bound" % stat] = main_dict["%s_lower_bound" % stat] dict["%s_lower_bound" %
stat] = main_dict["%s_lower_bound" %
stat]
# Filter outliers if the box is checked # Filter outliers if the box is checked
if len(self.widgets["outliers_filtering_compare"].active) > 0: if len(self.widgets["outliers_filtering_compare"].active) > 0:
outliers = helper.detect_outliers(dict[stat]) outliers = helper.detect_outliers(dict[stat])
dict[stat] = helper.remove_outliers(dict[stat], outliers) dict[stat] = helper.remove_outliers(dict[stat], outliers)
dict["%s_x" % stat] = helper.remove_outliers(dict["%s_x" % stat], outliers) dict["%s_x" % stat] = helper.remove_outliers(
dict["%s_x" % stat], outliers)
# Assign ColumnDataSource # Assign ColumnDataSource
self.sources["%s_source" % stat].data = dict self.sources["%s_source" % stat].data = dict
# Generate ColumnDataSource for the boxplot # Generate ColumnDataSource for the boxplot
dict = { dict = {
"is_git_commit": main_dict["is_git_commit"], "is_git_commit": main_dict["is_git_commit"],
@ -132,40 +129,48 @@ class CompareRuns:
"message": main_dict["message"], "message": main_dict["message"],
"x": main_dict["x"], "x": main_dict["x"],
"min" : main_dict["min"], "min": main_dict["min"],
"quantile25" : main_dict["quantile25"], "quantile25": main_dict["quantile25"],
"quantile50" : main_dict["quantile50"], "quantile50": main_dict["quantile50"],
"quantile75" : main_dict["quantile75"], "quantile75": main_dict["quantile75"],
"max" : main_dict["max"], "max": main_dict["max"],
"mu" : main_dict["mu"], "mu": main_dict["mu"],
"pvalue" : main_dict["pvalue"], "pvalue": main_dict["pvalue"],
"nsamples": main_dict["nsamples"] "nsamples": main_dict["nsamples"]
} }
self.sources["boxplot_source"].data = dict self.sources["boxplot_source"].data = dict
# Update x axis
# Update x_ranges helper.reset_x_range(
helper.reset_x_range(self.plots["boxplot"], self.sources["boxplot_source"].data["x"]) self.plots["boxplot"],
helper.reset_x_range(self.plots["sigma_plot"], self.sources["sigma_source"].data["sigma_x"]) self.sources["boxplot_source"].data["x"]
helper.reset_x_range(self.plots["s10_plot"], self.sources["s10_source"].data["s10_x"]) )
helper.reset_x_range(self.plots["s2_plot"], self.sources["s2_source"].data["s2_x"]) helper.reset_x_range(
self.plots["sigma_plot"],
self.sources["sigma_source"].data["sigma_x"]
)
helper.reset_x_range(
self.plots["s10_plot"],
self.sources["s10_source"].data["s10_x"]
)
helper.reset_x_range(
self.plots["s2_plot"],
self.sources["s2_source"].data["s2_x"]
)
# Widgets' callback functions # Widgets' callback functions
def update_test(self, attrname, old, new): def update_test(self, attrname, old, new):
# If the value is updated by the CustomJS, self.widgets["select_var"].value # If the value is updated by the CustomJS, self.widgets["select_var"].value
# won't be updated, so we have to look for that case and assign it manually # won't be updated, so we have to look for that case and assign it
# manually
# "new" should be a list when updated by CustomJS # "new" should be a list when updated by CustomJS
if type(new) == list: if isinstance(new, list):
# If filtering removed all options, we might have an empty list # If filtering removed all options, we might have an empty list
# (in this case, we just skip the callback and do nothing) # (in this case, we just skip the callback and do nothing)
if len(new) > 0: if len(new) > 0:
@ -180,10 +185,9 @@ class CompareRuns:
# New list of available vars # New list of available vars
self.vars = self.data.loc[new]\ self.vars = self.data.loc[new]\
.index.get_level_values("variable").drop_duplicates().tolist() .index.get_level_values("variable").drop_duplicates().tolist()
self.widgets["select_var"].options = self.vars self.widgets["select_var"].options = self.vars
# Reset var selection if old one is not available in new vars # Reset var selection if old one is not available in new vars
if self.widgets["select_var"].value not in self.vars: if self.widgets["select_var"].value not in self.vars:
self.widgets["select_var"].value = self.vars[0] self.widgets["select_var"].value = self.vars[0]
@ -194,14 +198,14 @@ class CompareRuns:
# anyway) # anyway)
self.update_var("", "", self.widgets["select_var"].value) self.update_var("", "", self.widgets["select_var"].value)
def update_var(self, attrname, old, new): def update_var(self, attrname, old, new):
# If the value is updated by the CustomJS, self.widgets["select_var"].value # If the value is updated by the CustomJS, self.widgets["select_var"].value
# won't be updated, so we have to look for that case and assign it manually # won't be updated, so we have to look for that case and assign it
# manually
# new should be a list when updated by CustomJS # new should be a list when updated by CustomJS
if type(new) == list: if isinstance(new, list):
new = new[0] new = new[0]
if new != self.widgets["select_var"].value: if new != self.widgets["select_var"].value:
@ -209,10 +213,9 @@ class CompareRuns:
self.widgets["select_var"].value = new self.widgets["select_var"].value = new
return return
# New list of available backends # New list of available backends
self.backends = self.data.loc[self.widgets["select_test"].value, self.widgets["select_var"].value]\ self.backends = self.data.loc[self.widgets["select_test"].value, self.widgets["select_var"].value]\
.index.get_level_values("vfc_backend").drop_duplicates().tolist() .index.get_level_values("vfc_backend").drop_duplicates().tolist()
self.widgets["select_backend"].options = self.backends self.widgets["select_backend"].options = self.backends
# Reset backend selection if old one is not available in new backends # Reset backend selection if old one is not available in new backends
@ -225,13 +228,11 @@ class CompareRuns:
# anyway) # anyway)
self.update_backend("", "", self.widgets["select_backend"].value) self.update_backend("", "", self.widgets["select_backend"].value)
def update_backend(self, attrname, old, new): def update_backend(self, attrname, old, new):
# Simply update plots, since no other data is affected # Simply update plots, since no other data is affected
self.update_plots() self.update_plots()
def update_n_runs(self, attrname, old, new): def update_n_runs(self, attrname, old, new):
# Simply update runs selection (value and string display) # Simply update runs selection (value and string display)
self.select_n_runs.value = new self.select_n_runs.value = new
@ -239,12 +240,9 @@ class CompareRuns:
self.update_plots() self.update_plots()
def update_outliers_filtering(self, attrname, old, new): def update_outliers_filtering(self, attrname, old, new):
self.update_plots() self.update_plots()
# Bokeh setup functions # Bokeh setup functions
def setup_plots(self): def setup_plots(self):
@ -256,7 +254,6 @@ class CompareRuns:
# (defined inside template to avoid bloating server w/ too much JS code) # (defined inside template to avoid bloating server w/ too much JS code)
js_tap_callback = "goToInspectRuns();" js_tap_callback = "goToInspectRuns();"
# Box plot # Box plot
self.plots["boxplot"] = figure( self.plots["boxplot"] = figure(
name="boxplot", title="Variable distribution over runs", name="boxplot", title="Variable distribution over runs",
@ -280,24 +277,23 @@ class CompareRuns:
("Number of samples", "@nsamples") ("Number of samples", "@nsamples")
] ]
box_tooltips_formatters = { box_tooltips_formatters = {
"@min" : "printf", "@min": "printf",
"@max" : "printf", "@max": "printf",
"@quantile25" : "printf", "@quantile25": "printf",
"@quantile50" : "printf", "@quantile50": "printf",
"@quantile75" : "printf", "@quantile75": "printf",
"@mu" : "printf" "@mu": "printf"
} }
plot.fill_boxplot( plot.fill_boxplot(
self.plots["boxplot"], self.sources["boxplot_source"], self.plots["boxplot"], self.sources["boxplot_source"],
tooltips = box_tooltips, tooltips=box_tooltips,
tooltips_formatters = box_tooltips_formatters, tooltips_formatters=box_tooltips_formatters,
js_tap_callback = js_tap_callback, js_tap_callback=js_tap_callback,
server_tap_callback = self.inspect_run_callback_boxplot, server_tap_callback=self.inspect_run_callback_boxplot,
) )
self.doc.add_root(self.plots["boxplot"]) self.doc.add_root(self.plots["boxplot"])
# Sigma plot (bar plot) # Sigma plot (bar plot)
self.plots["sigma_plot"] = figure( self.plots["sigma_plot"] = figure(
name="sigma_plot", title="Standard deviation σ over runs", name="sigma_plot", title="Standard deviation σ over runs",
@ -317,14 +313,13 @@ class CompareRuns:
plot.fill_dotplot( plot.fill_dotplot(
self.plots["sigma_plot"], self.sources["sigma_source"], "sigma", self.plots["sigma_plot"], self.sources["sigma_source"], "sigma",
tooltips = sigma_tooltips, tooltips=sigma_tooltips,
js_tap_callback = js_tap_callback, js_tap_callback=js_tap_callback,
server_tap_callback = self.inspect_run_callback_sigma, server_tap_callback=self.inspect_run_callback_sigma,
lines = True lines=True
) )
self.doc.add_root(self.plots["sigma_plot"]) self.doc.add_root(self.plots["sigma_plot"])
# s plot (bar plot with 2 tabs) # s plot (bar plot with 2 tabs)
self.plots["s10_plot"] = figure( self.plots["s10_plot"] = figure(
name="s10_plot", title="Significant digits s over runs", name="s10_plot", title="Significant digits s over runs",
@ -345,15 +340,14 @@ class CompareRuns:
plot.fill_dotplot( plot.fill_dotplot(
self.plots["s10_plot"], self.sources["s10_source"], "s10", self.plots["s10_plot"], self.sources["s10_source"], "s10",
tooltips = s10_tooltips, tooltips=s10_tooltips,
js_tap_callback = js_tap_callback, js_tap_callback=js_tap_callback,
server_tap_callback = self.inspect_run_callback_s10, server_tap_callback=self.inspect_run_callback_s10,
lines = True, lines=True,
lower_bound=True lower_bound=True
) )
s10_tab = Panel(child=self.plots["s10_plot"], title="Base 10") s10_tab = Panel(child=self.plots["s10_plot"], title="Base 10")
self.plots["s2_plot"] = figure( self.plots["s2_plot"] = figure(
name="s2_plot", title="Significant digits s over runs", name="s2_plot", title="Significant digits s over runs",
plot_width=900, plot_height=400, x_range=[""], plot_width=900, plot_height=400, x_range=[""],
@ -373,45 +367,42 @@ class CompareRuns:
plot.fill_dotplot( plot.fill_dotplot(
self.plots["s2_plot"], self.sources["s2_source"], "s2", self.plots["s2_plot"], self.sources["s2_source"], "s2",
tooltips = s2_tooltips, tooltips=s2_tooltips,
js_tap_callback = js_tap_callback, js_tap_callback=js_tap_callback,
server_tap_callback = self.inspect_run_callback_s2, server_tap_callback=self.inspect_run_callback_s2,
lines = True, lines=True,
lower_bound=True lower_bound=True
) )
s2_tab = Panel(child=self.plots["s2_plot"], title="Base 2") s2_tab = Panel(child=self.plots["s2_plot"], title="Base 2")
s_tabs = Tabs( s_tabs = Tabs(
name = "s_tabs", name="s_tabs",
tabs=[s10_tab, s2_tab], tabs=[s10_tab, s2_tab],
tabs_location = "below" tabs_location="below"
) )
self.doc.add_root(s_tabs) self.doc.add_root(s_tabs)
def setup_widgets(self): def setup_widgets(self):
# Initial selections # Initial selections
# Test/var/backend combination (we select all first elements at init) # Test/var/backend combination (we select all first elements at init)
self.tests = self.data\ self.tests = self.data\
.index.get_level_values("test").drop_duplicates().tolist() .index.get_level_values("test").drop_duplicates().tolist()
self.vars = self.data.loc[self.tests[0]]\ self.vars = self.data.loc[self.tests[0]]\
.index.get_level_values("variable").drop_duplicates().tolist() .index.get_level_values("variable").drop_duplicates().tolist()
self.backends = self.data.loc[self.tests[0], self.vars[0]]\ self.backends = self.data.loc[self.tests[0], self.vars[0]]\
.index.get_level_values("vfc_backend").drop_duplicates().tolist() .index.get_level_values("vfc_backend").drop_duplicates().tolist()
# Custom JS callback that will be used client side to filter selections # Custom JS callback that will be used client side to filter selections
filter_callback_js = """ filter_callback_js = """
selector.options = options.filter(e => e.includes(cb_obj.value)); selector.options = options.filter(e => e.includes(cb_obj.value));
""" """
# Test selector widget
# Test selector widget
# Number of runs to display # Number of runs to display
# The dict structure allows us to get int value from the display string # The dict structure allows us to get int value from the display string
@ -442,14 +433,16 @@ class CompareRuns:
self.widgets["test_filter"] = TextInput( self.widgets["test_filter"] = TextInput(
name="test_filter", title="Tests filter:" name="test_filter", title="Tests filter:"
) )
self.widgets["test_filter"].js_on_change("value", CustomJS( self.widgets["test_filter"].js_on_change(
args=dict(options=self.tests, selector=self.widgets["select_test"]), "value",
code=filter_callback_js CustomJS(
)) args=dict(
options=self.tests,
selector=self.widgets["select_test"]),
code=filter_callback_js))
self.doc.add_root(self.widgets["test_filter"]) self.doc.add_root(self.widgets["test_filter"])
# Number of runs to display
# Number of runs to display
self.widgets["select_n_runs"] = Select( self.widgets["select_n_runs"] = Select(
name="select_n_runs", title="Display :", name="select_n_runs", title="Display :",
@ -458,8 +451,7 @@ class CompareRuns:
self.doc.add_root(self.widgets["select_n_runs"]) self.doc.add_root(self.widgets["select_n_runs"])
self.widgets["select_n_runs"].on_change("value", self.update_n_runs) self.widgets["select_n_runs"].on_change("value", self.update_n_runs)
# Variable selector widget
# Variable selector widget
self.widgets["select_var"] = Select( self.widgets["select_var"] = Select(
name="select_var", title="Variable :", name="select_var", title="Variable :",
@ -469,8 +461,7 @@ class CompareRuns:
self.widgets["select_var"].on_change("value", self.update_var) self.widgets["select_var"].on_change("value", self.update_var)
self.widgets["select_var"].on_change("options", self.update_var) self.widgets["select_var"].on_change("options", self.update_var)
# Backend selector widget
# Backend selector widget
self.widgets["select_backend"] = Select( self.widgets["select_backend"] = Select(
name="select_backend", title="Verificarlo backend :", name="select_backend", title="Verificarlo backend :",
@ -479,23 +470,21 @@ class CompareRuns:
self.doc.add_root(self.widgets["select_backend"]) self.doc.add_root(self.widgets["select_backend"])
self.widgets["select_backend"].on_change("value", self.update_backend) self.widgets["select_backend"].on_change("value", self.update_backend)
# Outliers filtering checkbox
# Outliers filtering checkbox
self.widgets["outliers_filtering_compare"] = CheckboxGroup( self.widgets["outliers_filtering_compare"] = CheckboxGroup(
name="outliers_filtering_compare", name="outliers_filtering_compare",
labels=["Filter outliers"], active =[] labels=["Filter outliers"], active=[]
) )
self.doc.add_root(self.widgets["outliers_filtering_compare"]) self.doc.add_root(self.widgets["outliers_filtering_compare"])
self.widgets["outliers_filtering_compare"]\ self.widgets["outliers_filtering_compare"]\
.on_change("active", self.update_outliers_filtering) .on_change("active", self.update_outliers_filtering)
# Communication methods # Communication methods
# (to send/receive messages to/from master) # (to send/receive messages to/from master)
# Callback to change view of Inspect runs when data is selected # Callback to change view of Inspect runs when data is selected
def inspect_run_callback(self, new, source_name, x_name): def inspect_run_callback(self, new, source_name, x_name):
# In case we just unselected everything, then do nothing # In case we just unselected everything, then do nothing
@ -507,7 +496,6 @@ class CompareRuns:
self.master.go_to_inspect(run_name) self.master.go_to_inspect(run_name)
# Wrappers for each plot (since new is the index of the clicked element, # Wrappers for each plot (since new is the index of the clicked element,
# it is dependent of the plot because we could have filtered some outliers) # it is dependent of the plot because we could have filtered some outliers)
# There doesn't seem to be an easy way to add custom parameters to a # There doesn't seem to be an easy way to add custom parameters to a
@ -525,7 +513,6 @@ class CompareRuns:
def inspect_run_callback_s10(self, attr, old, new): def inspect_run_callback_s10(self, attr, old, new):
self.inspect_run_callback(new, "s10_source", "s10_x") self.inspect_run_callback(new, "s10_source", "s10_x")
# Constructor # Constructor
def __init__(self, master, doc, data, metadata): def __init__(self, master, doc, data, metadata):
@ -536,11 +523,10 @@ class CompareRuns:
self.data = data self.data = data
self.metadata = metadata self.metadata = metadata
self.sources = { self.sources = {
"boxplot_source": ColumnDataSource(data={}), "boxplot_source": ColumnDataSource(data={}),
"sigma_source": ColumnDataSource(data={}), "sigma_source": ColumnDataSource(data={}),
"s10_source" :ColumnDataSource(data={}), "s10_source": ColumnDataSource(data={}),
"s2_source": ColumnDataSource(data={}) "s2_source": ColumnDataSource(data={})
} }

View File

@ -10,7 +10,7 @@ import numpy as np
max_ticks = 15 max_ticks = 15
max_zscore = 3 max_zscore = 3
################################################################################ ##########################################################################
# From a timestamp, return the associated metadata as a Pandas serie # From a timestamp, return the associated metadata as a Pandas serie
@ -39,7 +39,6 @@ def get_run_name(timestamp, hash):
now = calendar.timegm(gmt) now = calendar.timegm(gmt)
diff = now - timestamp diff = now - timestamp
# Special case : < 1 minute (return string directly) # Special case : < 1 minute (return string directly)
if diff < 60: if diff < 60:
str = "Less than a minute ago" str = "Less than a minute ago"
@ -83,12 +82,10 @@ def get_run_name(timestamp, hash):
str = str % (n, plural) str = str % (n, plural)
# We might want to add the git hash # We might want to add the git hash
if hash != "": if hash != "":
str = str + " (%s)" % hash str = str + " (%s)" % hash
# Finally, check for duplicate with previously generated string # Finally, check for duplicate with previously generated string
if str == get_run_name.previous: if str == get_run_name.previous:
# Increment the duplicate counter and add it to str # Increment the duplicate counter and add it to str
@ -96,12 +93,14 @@ def get_run_name(timestamp, hash):
str = "%s (%s)" % (str, get_run_name.counter) str = "%s (%s)" % (str, get_run_name.counter)
else: else:
# No duplicate, reset both previously generated str and duplicate counter # No duplicate, reset both previously generated str and duplicate
# counter
get_run_name.counter = 0 get_run_name.counter = 0
get_run_name.previous = str get_run_name.previous = str
return str return str
# These external variables will store data about the last generated string to # These external variables will store data about the last generated string to
# avoid duplicates (assuming the runs are sorted by time) # avoid duplicates (assuming the runs are sorted by time)
get_run_name.counter = 0 get_run_name.counter = 0
@ -156,11 +155,16 @@ def remove_boxplot_outliers(dict, outliers, prefix):
dict["%s_x" % prefix] = remove_outliers(dict["%s_x" % prefix], outliers) dict["%s_x" % prefix] = remove_outliers(dict["%s_x" % prefix], outliers)
dict["%s_min" % prefix] = remove_outliers(dict["%s_min" % prefix], outliers) dict["%s_min" % prefix] = remove_outliers(
dict["%s_quantile25" % prefix] = remove_outliers(dict["%s_quantile25" % prefix], outliers) dict["%s_min" % prefix], outliers)
dict["%s_quantile50" % prefix] = remove_outliers(dict["%s_quantile50" % prefix], outliers) dict["%s_quantile25" % prefix] = remove_outliers(
dict["%s_quantile75" % prefix] = remove_outliers(dict["%s_quantile75" % prefix], outliers) dict["%s_quantile25" % prefix], outliers)
dict["%s_max" % prefix] = remove_outliers(dict["%s_max" % prefix], outliers) dict["%s_quantile50" % prefix] = remove_outliers(
dict["%s_quantile50" % prefix], outliers)
dict["%s_quantile75" % prefix] = remove_outliers(
dict["%s_quantile75" % prefix], outliers)
dict["%s_max" % prefix] = remove_outliers(
dict["%s_max" % prefix], outliers)
dict["%s_mu" % prefix] = remove_outliers(dict["%s_mu" % prefix], outliers) dict["%s_mu" % prefix] = remove_outliers(dict["%s_mu" % prefix], outliers)
dict["nsamples"] = remove_outliers(dict["nsamples"], outliers) dict["nsamples"] = remove_outliers(dict["nsamples"], outliers)

View File

@ -9,19 +9,18 @@ import numpy as np
from bokeh.plotting import figure, curdoc from bokeh.plotting import figure, curdoc
from bokeh.embed import components from bokeh.embed import components
from bokeh.models import Select, ColumnDataSource, Panel, Tabs, HoverTool,\ from bokeh.models import Select, ColumnDataSource, Panel, Tabs, HoverTool,\
RadioButtonGroup, CheckboxGroup, CustomJS RadioButtonGroup, CheckboxGroup, CustomJS
import helper import helper
import plot import plot
################################################################################ ##########################################################################
class InspectRuns: class InspectRuns:
# Helper functions related to InspectRun # Helper functions related to InspectRun
# Returns a dictionary mapping user-readable strings to all run timestamps # Returns a dictionary mapping user-readable strings to all run timestamps
def gen_runs_selection(self): def gen_runs_selection(self):
@ -40,7 +39,6 @@ class InspectRuns:
return runs_dict return runs_dict
def gen_boxplot_tooltips(self, prefix): def gen_boxplot_tooltips(self, prefix):
return [ return [
("Name", "@%s_x" % prefix), ("Name", "@%s_x" % prefix),
@ -55,34 +53,35 @@ class InspectRuns:
def gen_boxplot_tooltips_formatters(self, prefix): def gen_boxplot_tooltips_formatters(self, prefix):
return { return {
"@%s_min" % prefix : "printf", "@%s_min" % prefix: "printf",
"@%s_max" % prefix : "printf", "@%s_max" % prefix: "printf",
"@%s_quantile25" % prefix : "printf", "@%s_quantile25" % prefix: "printf",
"@%s_quantile50" % prefix : "printf", "@%s_quantile50" % prefix: "printf",
"@%s_quantile75" % prefix : "printf", "@%s_quantile75" % prefix: "printf",
"@%s_mu" % prefix : "printf" "@%s_mu" % prefix: "printf"
} }
# Data processing helper # Data processing helper
# (computes new distributions for sigma, s2, s10) # (computes new distributions for sigma, s2, s10)
def data_processing(self, dataframe): def data_processing(self, dataframe):
# Compute aggragated mu # Compute aggragated mu
dataframe["mu"] = np.vectorize(np.average)(dataframe["mu"], weights=dataframe["nsamples"]) dataframe["mu"] = np.vectorize(
np.average)(
dataframe["mu"],
weights=dataframe["nsamples"])
# nsamples is the number of aggregated elements (as well as the number # nsamples is the number of aggregated elements (as well as the number
# of samples for our new sigma and s distributions) # of samples for our new sigma and s distributions)
dataframe["nsamples"] = dataframe["nsamples"].apply(lambda x: len(x)) dataframe["nsamples"] = dataframe["nsamples"].apply(lambda x: len(x))
dataframe["mu_x"] = dataframe.index dataframe["mu_x"] = dataframe.index
# Make sure that strings don't excede a certain length # Make sure that strings don't excede a certain length
dataframe["mu_x"] = dataframe["mu_x"].apply( dataframe["mu_x"] = dataframe["mu_x"].apply(
lambda x: x[:17] + "[...]" + x[-17:] if len(x) > 39 else x lambda x: x[:17] + "[...]" + x[-17:] if len(x) > 39 else x
) )
# Get quantiles and mu for sigma, s10, s2 # Get quantiles and mu for sigma, s10, s2
for prefix in ["sigma", "s10", "s2"]: for prefix in ["sigma", "s10", "s2"]:
@ -91,19 +90,19 @@ class InspectRuns:
dataframe[prefix] = dataframe[prefix].apply(np.sort) dataframe[prefix] = dataframe[prefix].apply(np.sort)
dataframe["%s_min" % prefix] = dataframe[prefix].apply(np.min) dataframe["%s_min" % prefix] = dataframe[prefix].apply(np.min)
dataframe["%s_quantile25" % prefix] = dataframe[prefix].apply(np.quantile, args=(0.25,)) dataframe["%s_quantile25" % prefix] = dataframe[prefix].apply(
dataframe["%s_quantile50" % prefix] = dataframe[prefix].apply(np.quantile, args=(0.50,)) np.quantile, args=(0.25,))
dataframe["%s_quantile75" % prefix] = dataframe[prefix].apply(np.quantile, args=(0.75,)) dataframe["%s_quantile50" % prefix] = dataframe[prefix].apply(
np.quantile, args=(0.50,))
dataframe["%s_quantile75" % prefix] = dataframe[prefix].apply(
np.quantile, args=(0.75,))
dataframe["%s_max" % prefix] = dataframe[prefix].apply(np.max) dataframe["%s_max" % prefix] = dataframe[prefix].apply(np.max)
dataframe["%s_mu" % prefix] = dataframe[prefix].apply(np.average) dataframe["%s_mu" % prefix] = dataframe[prefix].apply(np.average)
del dataframe[prefix] del dataframe[prefix]
return dataframe return dataframe
# Plots update function
# Plots update function
def update_plots(self): def update_plots(self):
@ -117,8 +116,7 @@ class InspectRuns:
] ]
filterby = self.factors_dict[filterby_display] filterby = self.factors_dict[filterby_display]
# Groupby and aggregate lines belonging to the same group in lists
# Groupby and aggregate lines belonging to the same group in lists
groups = self.run_data[ groups = self.run_data[
self.run_data.index.isin( self.run_data.index.isin(
@ -131,32 +129,31 @@ class InspectRuns:
"sigma": lambda x: x.tolist(), "sigma": lambda x: x.tolist(),
"s10": lambda x: x.tolist(), "s10": lambda x: x.tolist(),
"s2": lambda x: x.tolist(), "s2": lambda x: x.tolist(),
"mu": lambda x: x.tolist(), "mu": lambda x: x.tolist(),
# Used for mu weighted average first, then will be replaced # Used for mu weighted average first, then will be replaced
"nsamples": lambda x: x.tolist() "nsamples": lambda x: x.tolist()
}) })
# Compute the new distributions, ...
# Compute the new distributions, ...
groups = self.data_processing(groups).to_dict("list") groups = self.data_processing(groups).to_dict("list")
# Update source
# Update source
# Assign each ColumnDataSource, starting with the boxplots # Assign each ColumnDataSource, starting with the boxplots
for prefix in ["sigma", "s10", "s2"]: for prefix in ["sigma", "s10", "s2"]:
dict = { dict = {
"%s_x" % prefix: groups["%s_x" % prefix], "%s_x" % prefix: groups["%s_x" % prefix],
"%s_min" % prefix: groups["%s_min" % prefix], "%s_min" % prefix: groups["%s_min" % prefix],
"%s_quantile25" % prefix: groups["%s_quantile25" % prefix], "%s_quantile25" % prefix: groups["%s_quantile25" % prefix],
"%s_quantile50" % prefix: groups["%s_quantile50" % prefix], "%s_quantile50" % prefix: groups["%s_quantile50" % prefix],
"%s_quantile75" % prefix: groups["%s_quantile75" % prefix], "%s_quantile75" % prefix: groups["%s_quantile75" % prefix],
"%s_max" % prefix: groups["%s_max" % prefix], "%s_max" % prefix: groups["%s_max" % prefix],
"%s_mu" % prefix: groups["%s_mu" % prefix], "%s_mu" % prefix: groups["%s_mu" % prefix],
"nsamples": groups["nsamples"] "nsamples": groups["nsamples"]
} }
# Filter outliers if the box is checked # Filter outliers if the box is checked
@ -166,7 +163,8 @@ class InspectRuns:
top_outliers = helper.detect_outliers(dict["%s_max" % prefix]) top_outliers = helper.detect_outliers(dict["%s_max" % prefix])
helper.remove_boxplot_outliers(dict, top_outliers, prefix) helper.remove_boxplot_outliers(dict, top_outliers, prefix)
bottom_outliers = helper.detect_outliers(dict["%s_min" % prefix]) bottom_outliers = helper.detect_outliers(
dict["%s_min" % prefix])
helper.remove_boxplot_outliers(dict, bottom_outliers, prefix) helper.remove_boxplot_outliers(dict, bottom_outliers, prefix)
self.sources["%s_source" % prefix].data = dict self.sources["%s_source" % prefix].data = dict
@ -185,8 +183,8 @@ class InspectRuns:
if len(self.widgets["outliers_filtering_inspect"].active) > 0: if len(self.widgets["outliers_filtering_inspect"].active) > 0:
mu_outliers = helper.detect_outliers(groups["mu"]) mu_outliers = helper.detect_outliers(groups["mu"])
groups["mu"] = helper.remove_outliers(groups["mu"], mu_outliers) groups["mu"] = helper.remove_outliers(groups["mu"], mu_outliers)
groups["mu_x"] = helper.remove_outliers(groups["mu_x"], mu_outliers) groups["mu_x"] = helper.remove_outliers(
groups["mu_x"], mu_outliers)
# Update plots axis/titles # Update plots axis/titles
@ -194,42 +192,38 @@ class InspectRuns:
factors_dict = self.factors_dict.copy() factors_dict = self.factors_dict.copy()
del factors_dict[groupby_display] del factors_dict[groupby_display]
del factors_dict[filterby_display] del factors_dict[filterby_display]
over_all = list(factors_dict.keys())[0] for_all = list(factors_dict.keys())[0]
# Update all display strings for plot title (remove caps, plural) # Update all display strings for plot title (remove caps, plural)
groupby_display = groupby_display.lower() groupby_display = groupby_display.lower()
filterby_display = filterby_display.lower()[:-1] filterby_display = filterby_display.lower()[:-1]
over_all = over_all.lower() for_all = for_all.lower()
self.plots["mu_inspect"].title.text = \ self.plots["mu_inspect"].title.text = \
"Empirical average μ of %s (groupped by %s, for all %s)" \ "Empirical average μ of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, over_all) % (filterby_display, groupby_display, for_all)
self.plots["sigma_inspect"].title.text = \ self.plots["sigma_inspect"].title.text = \
"Standard deviation σ of %s (groupped by %s, for all %s)" \ "Standard deviation σ of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, over_all) % (filterby_display, groupby_display, for_all)
self.plots["s10_inspect"].title.text = \ self.plots["s10_inspect"].title.text = \
"Significant digits s of %s (groupped by %s, for all %s)" \ "Significant digits s of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, over_all) % (filterby_display, groupby_display, for_all)
self.plots["s2_inspect"].title.text = \ self.plots["s2_inspect"].title.text = \
"Significant digits s of %s (groupped by %s, for all %s)" \ "Significant digits s of %s (groupped by %s, for all %s)" \
% (filterby_display, groupby_display, over_all) % (filterby_display, groupby_display, for_all)
# Update x_ranges
helper.reset_x_range(self.plots["mu_inspect"], groups["mu_x"]) helper.reset_x_range(self.plots["mu_inspect"], groups["mu_x"])
helper.reset_x_range(self.plots["sigma_inspect"], groups["sigma_x"]) helper.reset_x_range(self.plots["sigma_inspect"], groups["sigma_x"])
helper.reset_x_range(self.plots["s10_inspect"], groups["s10_x"]) helper.reset_x_range(self.plots["s10_inspect"], groups["s10_x"])
helper.reset_x_range(self.plots["s2_inspect"], groups["s2_x"]) helper.reset_x_range(self.plots["s2_inspect"], groups["s2_x"])
# Widets' callback functions # Widets' callback functions
# Run selector callback # Run selector callback
def update_run(self, attrname, old, new): def update_run(self, attrname, old, new):
filterby = self.widgets["filterby_radio"].labels[ filterby = self.widgets["filterby_radio"].labels[
@ -248,7 +242,7 @@ class InspectRuns:
# Update filter options # Update filter options
options = self.run_data.index\ options = self.run_data.index\
.get_level_values(filterby).drop_duplicates().tolist() .get_level_values(filterby).drop_duplicates().tolist()
self.widgets["select_filter"].options = options self.widgets["select_filter"].options = options
if old_value not in self.widgets["select_filter"].options: if old_value not in self.widgets["select_filter"].options:
@ -260,8 +254,8 @@ class InspectRuns:
# anyway) # anyway)
self.update_filter("", "", old_value) self.update_filter("", "", old_value)
# "Group by" radio # "Group by" radio
def update_groupby(self, attrname, old, new): def update_groupby(self, attrname, old, new):
# Update "Filter by" radio list # Update "Filter by" radio list
@ -269,7 +263,6 @@ class InspectRuns:
del filterby_list[self.widgets["groupby_radio"].active] del filterby_list[self.widgets["groupby_radio"].active]
self.widgets["filterby_radio"].labels = filterby_list self.widgets["filterby_radio"].labels = filterby_list
filterby = self.widgets["filterby_radio"].labels[ filterby = self.widgets["filterby_radio"].labels[
self.widgets["filterby_radio"].active self.widgets["filterby_radio"].active
] ]
@ -280,7 +273,7 @@ class InspectRuns:
# Update filter options # Update filter options
options = self.run_data.index\ options = self.run_data.index\
.get_level_values(filterby).drop_duplicates().tolist() .get_level_values(filterby).drop_duplicates().tolist()
self.widgets["select_filter"].options = options self.widgets["select_filter"].options = options
if old_value not in self.widgets["select_filter"].options: if old_value not in self.widgets["select_filter"].options:
@ -292,8 +285,8 @@ class InspectRuns:
# anyway) # anyway)
self.update_filter("", "", old_value) self.update_filter("", "", old_value)
# "Filter by" radio # "Filter by" radio
def update_filterby(self, attrname, old, new): def update_filterby(self, attrname, old, new):
filterby = self.widgets["filterby_radio"].labels[ filterby = self.widgets["filterby_radio"].labels[
@ -306,7 +299,7 @@ class InspectRuns:
# Update filter selector options # Update filter selector options
options = self.run_data.index\ options = self.run_data.index\
.get_level_values(filterby).drop_duplicates().tolist() .get_level_values(filterby).drop_duplicates().tolist()
self.widgets["select_filter"].options = options self.widgets["select_filter"].options = options
if old_value not in self.widgets["select_filter"].options: if old_value not in self.widgets["select_filter"].options:
@ -318,20 +311,18 @@ class InspectRuns:
# anyway) # anyway)
self.update_filter("", "", old_value) self.update_filter("", "", old_value)
# Filter selector callback # Filter selector callback
def update_filter(self, attrname, old, new): def update_filter(self, attrname, old, new):
self.update_plots() self.update_plots()
# Filter outliers checkbox callback # Filter outliers checkbox callback
def update_outliers_filtering(self, attrname, old, new): def update_outliers_filtering(self, attrname, old, new):
# The status (checked/unchecked) of the checkbox is also verified inside # The status (checked/unchecked) of the checkbox is also verified inside
# self.update_plots(), so calling this function is enough # self.update_plots(), so calling this function is enough
self.update_plots() self.update_plots()
# Bokeh setup functions # Bokeh setup functions
# (for both variable and backend selection at once) # (for both variable and backend selection at once)
@ -339,8 +330,7 @@ class InspectRuns:
tools = "pan, wheel_zoom, xwheel_zoom, ywheel_zoom, reset, save" tools = "pan, wheel_zoom, xwheel_zoom, ywheel_zoom, reset, save"
# Tooltips and formatters
# Tooltips and formatters
dotplot_tooltips = [ dotplot_tooltips = [
("Name", "@mu_x"), ("Name", "@mu_x"),
@ -348,20 +338,22 @@ class InspectRuns:
("Number of samples (tests)", "@nsamples") ("Number of samples (tests)", "@nsamples")
] ]
dotplot_formatters = { dotplot_formatters = {
"@mu" : "printf" "@mu": "printf"
} }
sigma_boxplot_tooltips = self.gen_boxplot_tooltips("sigma") sigma_boxplot_tooltips = self.gen_boxplot_tooltips("sigma")
sigma_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters("sigma") sigma_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters(
"sigma")
s10_boxplot_tooltips = self.gen_boxplot_tooltips("s10") s10_boxplot_tooltips = self.gen_boxplot_tooltips("s10")
s10_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters("s10") s10_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters(
"s10")
s2_boxplot_tooltips = self.gen_boxplot_tooltips("s2") s2_boxplot_tooltips = self.gen_boxplot_tooltips("s2")
s2_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters("s2") s2_boxplot_tooltips_formatters = self.gen_boxplot_tooltips_formatters(
"s2")
# Plots
# Plots
# Mu plot # Mu plot
self.plots["mu_inspect"] = figure( self.plots["mu_inspect"] = figure(
@ -372,12 +364,11 @@ class InspectRuns:
) )
plot.fill_dotplot( plot.fill_dotplot(
self.plots["mu_inspect"], self.sources["mu_source"], "mu", self.plots["mu_inspect"], self.sources["mu_source"], "mu",
tooltips = dotplot_tooltips, tooltips=dotplot_tooltips,
tooltips_formatters = dotplot_formatters tooltips_formatters=dotplot_formatters
) )
self.doc.add_root(self.plots["mu_inspect"]) self.doc.add_root(self.plots["mu_inspect"])
# Sigma plot # Sigma plot
self.plots["sigma_inspect"] = figure( self.plots["sigma_inspect"] = figure(
name="sigma_inspect", name="sigma_inspect",
@ -386,13 +377,13 @@ class InspectRuns:
tools=tools, sizing_mode="scale_width" tools=tools, sizing_mode="scale_width"
) )
plot.fill_boxplot( plot.fill_boxplot(
self.plots["sigma_inspect"], self.sources["sigma_source"], prefix="sigma", self.plots["sigma_inspect"],
tooltips = sigma_boxplot_tooltips, self.sources["sigma_source"],
tooltips_formatters = sigma_boxplot_tooltips_formatters prefix="sigma",
) tooltips=sigma_boxplot_tooltips,
tooltips_formatters=sigma_boxplot_tooltips_formatters)
self.doc.add_root(self.plots["sigma_inspect"]) self.doc.add_root(self.plots["sigma_inspect"])
# s plots # s plots
self.plots["s10_inspect"] = figure( self.plots["s10_inspect"] = figure(
name="s10_inspect", name="s10_inspect",
@ -401,11 +392,14 @@ class InspectRuns:
tools=tools, sizing_mode='scale_width' tools=tools, sizing_mode='scale_width'
) )
plot.fill_boxplot( plot.fill_boxplot(
self.plots["s10_inspect"], self.sources["s10_source"], prefix="s10", self.plots["s10_inspect"],
tooltips = s10_boxplot_tooltips, self.sources["s10_source"],
tooltips_formatters = s10_boxplot_tooltips_formatters prefix="s10",
) tooltips=s10_boxplot_tooltips,
s10_tab_inspect = Panel(child=self.plots["s10_inspect"], title="Base 10") tooltips_formatters=s10_boxplot_tooltips_formatters)
s10_tab_inspect = Panel(
child=self.plots["s10_inspect"],
title="Base 10")
self.plots["s2_inspect"] = figure( self.plots["s2_inspect"] = figure(
name="s2_inspect", name="s2_inspect",
@ -415,22 +409,20 @@ class InspectRuns:
) )
plot.fill_boxplot( plot.fill_boxplot(
self.plots["s2_inspect"], self.sources["s2_source"], prefix="s2", self.plots["s2_inspect"], self.sources["s2_source"], prefix="s2",
tooltips = s2_boxplot_tooltips, tooltips=s2_boxplot_tooltips,
tooltips_formatters = s2_boxplot_tooltips_formatters tooltips_formatters=s2_boxplot_tooltips_formatters
) )
s2_tab_inspect = Panel(child=self.plots["s2_inspect"], title="Base 2") s2_tab_inspect = Panel(child=self.plots["s2_inspect"], title="Base 2")
s_tabs_inspect = Tabs( s_tabs_inspect = Tabs(
name = "s_tabs_inspect", name="s_tabs_inspect",
tabs=[s10_tab_inspect, s2_tab_inspect], tabs_location = "below" tabs=[s10_tab_inspect, s2_tab_inspect], tabs_location="below"
) )
self.doc.add_root(s_tabs_inspect) self.doc.add_root(s_tabs_inspect)
def setup_widgets(self): def setup_widgets(self):
# Generation of selectable items # Generation of selectable items
# Dict contains all inspectable runs (maps display strings to timestamps) # Dict contains all inspectable runs (maps display strings to timestamps)
# The dict structure allows to get the timestamp from the display string # The dict structure allows to get the timestamp from the display string
@ -445,8 +437,7 @@ class InspectRuns:
"Tests": "test" "Tests": "test"
} }
# Run selection
# Run selection
# Contains all options strings # Contains all options strings
runs_display = list(self.runs_dict.keys()) runs_display = list(self.runs_dict.keys())
@ -457,8 +448,7 @@ class InspectRuns:
# This contains only entries matching the run # This contains only entries matching the run
self.run_data = self.data[self.data["timestamp"] == self.current_run] self.run_data = self.data[self.data["timestamp"] == self.current_run]
change_run_callback_js = "updateRunMetadata(cb_obj.value);"
change_run_callback_js="updateRunMetadata(cb_obj.value);"
self.widgets["select_run"] = Select( self.widgets["select_run"] = Select(
name="select_run", title="Run :", name="select_run", title="Run :",
@ -467,7 +457,7 @@ class InspectRuns:
self.doc.add_root(self.widgets["select_run"]) self.doc.add_root(self.widgets["select_run"])
self.widgets["select_run"].on_change("value", self.update_run) self.widgets["select_run"].on_change("value", self.update_run)
self.widgets["select_run"].js_on_change("value", CustomJS( self.widgets["select_run"].js_on_change("value", CustomJS(
code = change_run_callback_js, code=change_run_callback_js,
args=(dict( args=(dict(
metadata=helper.metadata_to_dict( metadata=helper.metadata_to_dict(
helper.get_metadata(self.metadata, self.current_run) helper.get_metadata(self.metadata, self.current_run)
@ -475,8 +465,7 @@ class InspectRuns:
)) ))
)) ))
# Factors selection
# Factors selection
# "Group by" radio # "Group by" radio
self.widgets["groupby_radio"] = RadioButtonGroup( self.widgets["groupby_radio"] = RadioButtonGroup(
@ -491,7 +480,6 @@ class InspectRuns:
self.update_groupby self.update_groupby
) )
# "Filter by" radio # "Filter by" radio
# Get all possible factors, and remove the one selected in "Group by" # Get all possible factors, and remove the one selected in "Group by"
filterby_list = list(self.factors_dict.keys()) filterby_list = list(self.factors_dict.keys())
@ -509,7 +497,6 @@ class InspectRuns:
self.update_filterby self.update_filterby
) )
# Filter selector # Filter selector
filterby = self.widgets["filterby_radio"].labels[ filterby = self.widgets["filterby_radio"].labels[
@ -518,7 +505,7 @@ class InspectRuns:
filterby = self.factors_dict[filterby] filterby = self.factors_dict[filterby]
options = self.run_data.index\ options = self.run_data.index\
.get_level_values(filterby).drop_duplicates().tolist() .get_level_values(filterby).drop_duplicates().tolist()
self.widgets["select_filter"] = Select( self.widgets["select_filter"] = Select(
# We need a different name to avoid collision in the template with # We need a different name to avoid collision in the template with
@ -528,30 +515,26 @@ class InspectRuns:
) )
self.doc.add_root(self.widgets["select_filter"]) self.doc.add_root(self.widgets["select_filter"])
self.widgets["select_filter"]\ self.widgets["select_filter"]\
.on_change("value", self.update_filter) .on_change("value", self.update_filter)
# Toggle for outliers filtering
# Toggle for outliers filtering
self.widgets["outliers_filtering_inspect"] = CheckboxGroup( self.widgets["outliers_filtering_inspect"] = CheckboxGroup(
name="outliers_filtering_inspect", name="outliers_filtering_inspect",
labels=["Filter outliers"], active = [] labels=["Filter outliers"], active=[]
) )
self.doc.add_root(self.widgets["outliers_filtering_inspect"]) self.doc.add_root(self.widgets["outliers_filtering_inspect"])
self.widgets["outliers_filtering_inspect"]\ self.widgets["outliers_filtering_inspect"]\
.on_change("active", self.update_outliers_filtering) .on_change("active", self.update_outliers_filtering)
# Communication methods # Communication methods
# (to send/receive messages to/from master) # (to send/receive messages to/from master)
# When received, switch to the run_name in parameter # When received, switch to the run_name in parameter
def switch_view(self, run_name): def switch_view(self, run_name):
self.widgets["select_run"].value = run_name self.widgets["select_run"].value = run_name
# Constructor # Constructor
def __init__(self, master, doc, data, metadata): def __init__(self, master, doc, data, metadata):
@ -562,11 +545,10 @@ class InspectRuns:
self.data = data self.data = data
self.metadata = metadata self.metadata = metadata
self.sources = { self.sources = {
"mu_source": ColumnDataSource(data={}), "mu_source": ColumnDataSource(data={}),
"sigma_source": ColumnDataSource(data={}), "sigma_source": ColumnDataSource(data={}),
"s10_source" :ColumnDataSource(data={}), "s10_source": ColumnDataSource(data={}),
"s2_source": ColumnDataSource(data={}) "s2_source": ColumnDataSource(data={})
} }

View File

@ -1,5 +1,5 @@
# Look for and read all the run files in the current directory (ending with # Look for and read all the run files in the current directory (ending with
# .vfcrun.hd5), and lanch a Bokeh server for the visualization of this data. # .vfcrunh5), and lanch a Bokeh server for the visualization of this data.
import os import os
import sys import sys
@ -14,18 +14,16 @@ import compare_runs
import inspect_runs import inspect_runs
import helper import helper
################################################################################ ##########################################################################
# Read vfcrun files, and aggregate them in one dataset
# Read vfcrun files, and aggregate them in one dataset run_files = [f for f in os.listdir(".") if f.endswith(".vfcrun.h5")]
run_files = [ f for f in os.listdir(".") if f.endswith(".vfcrun.hd5") ]
if len(run_files) == 0: if len(run_files) == 0:
print( print(
"Warning [vfc_ci]: Could not find any vfcrun files in the directory. " \ "Warning [vfc_ci]: Could not find any vfcrun files in the directory. "
"This will result in server errors and prevent you from viewing the report." "This will result in server errors and prevent you from viewing the report.")
)
# These are arrays of Pandas dataframes for now # These are arrays of Pandas dataframes for now
metadata = [] metadata = []
@ -55,15 +53,14 @@ metadata["date"] = metadata.index.to_series().map(
) )
################################################################################ ##########################################################################
curdoc().title = "Verificarlo Report" curdoc().title = "Verificarlo Report"
# Read server arguments
# Read server arguments # (this is quite easy because Bokeh server is called through a wrapper, so
# (this is quite easy because Bokeh server is called through a wrapper, so # we know exactly what the arguments might be)
# we know exactly what the arguments might be)
git_repo_linked = False git_repo_linked = False
commit_link = "" commit_link = ""
@ -83,7 +80,6 @@ for i in range(1, len(sys.argv)):
address = sys.argv[i + 2] address = sys.argv[i + 2]
url = "" url = ""
# Here, address is either the remote URL or the path to the local Git # Here, address is either the remote URL or the path to the local Git
# repo (depending on the method) # repo (depending on the method)
@ -99,12 +95,11 @@ for i in range(1, len(sys.argv)):
else: else:
raise ValueError( raise ValueError(
"Error [vfc_ci]: The specified method to get the Git " \ "Error [vfc_ci]: The specified method to get the Git "
"repository is invalid. Are you calling Bokeh directly " \ "repository is invalid. Are you calling Bokeh directly "
"instead of using the Verificarlo wrapper ?" "instead of using the Verificarlo wrapper ?"
) )
# At this point, "url" should be set correctly, we can get the repo's # At this point, "url" should be set correctly, we can get the repo's
# URL and name, after making sure we're on a Git URL # URL and name, after making sure we're on a Git URL
@ -113,7 +108,7 @@ for i in range(1, len(sys.argv)):
path = parsed_url.path.split("/") path = parsed_url.path.split("/")
if len(path) < 3: if len(path) < 3:
raise ValueError( raise ValueError(
"Error [vfc_ci]: The found URL doesn't seem to be pointing " \ "Error [vfc_ci]: The found URL doesn't seem to be pointing "
"to a Git repository (path is too short)" "to a Git repository (path is too short)"
) )
@ -122,12 +117,11 @@ for i in range(1, len(sys.argv)):
curdoc().template_variables["repo_url"] = url curdoc().template_variables["repo_url"] = url
curdoc().template_variables["repo_name"] = repo_name curdoc().template_variables["repo_name"] = repo_name
# We should have a "github.com" or a "*gitlab*" URL # We should have a "github.com" or a "*gitlab*" URL
if parsed_url.netloc == "github.com": if parsed_url.netloc == "github.com":
commit_link = "https://%s%s/commit/" \ commit_link = "https://%s%s/commit/" \
% (parsed_url.netloc, parsed_url.path) % (parsed_url.netloc, parsed_url.path)
curdoc().template_variables["commit_link"] = commit_link curdoc().template_variables["commit_link"] = commit_link
curdoc().template_variables["git_host"] = "GitHub" curdoc().template_variables["git_host"] = "GitHub"
@ -138,7 +132,7 @@ for i in range(1, len(sys.argv)):
# We assume we have a GitLab URL # We assume we have a GitLab URL
else: else:
commit_link = "https://%s%s/-/commit/" \ commit_link = "https://%s%s/-/commit/" \
% (parsed_url.netloc, parsed_url.path) % (parsed_url.netloc, parsed_url.path)
curdoc().template_variables["commit_link"] = commit_link curdoc().template_variables["commit_link"] = commit_link
curdoc().template_variables["git_host"] = "GitLab" curdoc().template_variables["git_host"] = "GitLab"
@ -148,8 +142,6 @@ for i in range(1, len(sys.argv)):
git_repo_linked = True git_repo_linked = True
# Look for a logo URL # Look for a logo URL
# If a logo URL is specified, it will be included in the report's header # If a logo URL is specified, it will be included in the report's header
if sys.argv[i] == "logo": if sys.argv[i] == "logo":
@ -162,10 +154,9 @@ curdoc().template_variables["git_repo_linked"] = git_repo_linked
curdoc().template_variables["has_logo"] = has_logo curdoc().template_variables["has_logo"] = has_logo
################################################################################ ##########################################################################
# Setup report views
# Setup report views
# Define a ViewsMaster class to allow two-ways communication between views. # Define a ViewsMaster class to allow two-ways communication between views.
# This approach by classes allows us to have separate scopes for each view and # This approach by classes allows us to have separate scopes for each view and
@ -174,13 +165,12 @@ curdoc().template_variables["has_logo"] = has_logo
class ViewsMaster: class ViewsMaster:
# Communication functions # Communication functions
def go_to_inspect(self, run_name): def go_to_inspect(self, run_name):
self.inspect.switch_view(run_name) self.inspect.switch_view(run_name)
# Constructor
#Constructor
def __init__(self, data, metadata, git_repo_linked, commit_link): def __init__(self, data, metadata, git_repo_linked, commit_link):
@ -190,28 +180,29 @@ class ViewsMaster:
self.commit_link = commit_link self.commit_link = commit_link
# Pass metadata to the template as a JSON string # Pass metadata to the template as a JSON string
curdoc().template_variables["metadata"] = self.metadata.to_json(orient="index") curdoc().template_variables["metadata"] = self.metadata.to_json(
orient="index")
# Runs comparison # Runs comparison
self.compare = compare_runs.CompareRuns( self.compare = compare_runs.CompareRuns(
master = self, master=self,
doc = curdoc(), doc=curdoc(),
data = data, data=data,
metadata = metadata, metadata=metadata,
) )
# Runs inspection # Runs inspection
self.inspect = inspect_runs.InspectRuns( self.inspect = inspect_runs.InspectRuns(
master = self, master=self,
doc = curdoc(), doc=curdoc(),
data = data, data=data,
metadata = metadata, metadata=metadata,
) )
views_master = ViewsMaster( views_master = ViewsMaster(
data = data, data=data,
metadata = metadata, metadata=metadata,
git_repo_linked = git_repo_linked, git_repo_linked=git_repo_linked,
commit_link = commit_link commit_link=commit_link
) )

View File

@ -15,21 +15,19 @@ def fill_dotplot(
): ):
# (Optional) Tooltip and tooltip formatters # (Optional) Tooltip and tooltip formatters
if tooltips != None: if tooltips is not None:
hover = HoverTool(tooltips = tooltips, mode="vline", names=["circle"]) hover = HoverTool(tooltips=tooltips, mode="vline", names=["circle"])
if tooltips_formatters != None: if tooltips_formatters is not None:
hover.formatters = tooltips_formatters hover.formatters = tooltips_formatters
plot.add_tools(hover) plot.add_tools(hover)
# (Optional) Add TapTool (for JS tap callback) # (Optional) Add TapTool (for JS tap callback)
if js_tap_callback != None: if js_tap_callback is not None:
tap = TapTool(callback=CustomJS(code=js_tap_callback)) tap = TapTool(callback=CustomJS(code=js_tap_callback))
plot.add_tools(tap) plot.add_tools(tap)
# (Optional) Add segment to represent a lower bound # (Optional) Add segment to represent a lower bound
if lower_bound: if lower_bound:
lower_segment = plot.segment( lower_segment = plot.segment(
@ -38,24 +36,20 @@ def fill_dotplot(
source=source, line_color="black" source=source, line_color="black"
) )
# Draw dots (actually Bokeh circles) # Draw dots (actually Bokeh circles)
circle = plot.circle( circle = plot.circle(
name="circle", name="circle",
x="%s_x" % data_field, y=data_field, source=source, size=12 x="%s_x" % data_field, y=data_field, source=source, size=12
) )
# (Optional) Draw lines between dots # (Optional) Draw lines between dots
if lines: if lines:
line = plot.line(x="%s_x" % data_field, y=data_field, source=source) line = plot.line(x="%s_x" % data_field, y=data_field, source=source)
# (Optional) Add server tap callback # (Optional) Add server tap callback
if server_tap_callback != None: if server_tap_callback is not None:
circle.data_source.selected.on_change("indices", server_tap_callback) circle.data_source.selected.on_change("indices", server_tap_callback)
# Plot appearance # Plot appearance
plot.xgrid.grid_line_color = None plot.xgrid.grid_line_color = None
plot.ygrid.grid_line_color = None plot.ygrid.grid_line_color = None
@ -64,33 +58,30 @@ def fill_dotplot(
plot.yaxis[0].formatter.power_limit_low = 0 plot.yaxis[0].formatter.power_limit_low = 0
plot.yaxis[0].formatter.precision = 3 plot.yaxis[0].formatter.precision = 3
plot.xaxis[0].major_label_orientation = pi/8 plot.xaxis[0].major_label_orientation = pi / 8
def fill_boxplot( def fill_boxplot(
plot, source, plot, source,
prefix="", prefix="",
tooltips=None, tooltips_formatters=None, tooltips=None, tooltips_formatters=None,
js_tap_callback=None, server_tap_callback=None, js_tap_callback=None, server_tap_callback=None
): ):
# (Optional) Tooltip and tooltip formatters # (Optional) Tooltip and tooltip formatters
if tooltips != None: if tooltips is not None:
hover = HoverTool(tooltips = tooltips, mode="vline", names=["full_box"]) hover = HoverTool(tooltips=tooltips, mode="vline", names=["full_box"])
if tooltips_formatters != None: if tooltips_formatters is not None:
hover.formatters = tooltips_formatters hover.formatters = tooltips_formatters
plot.add_tools(hover) plot.add_tools(hover)
# (Optional) Add TapTool (for JS tap callback) # (Optional) Add TapTool (for JS tap callback)
if js_tap_callback != None: if js_tap_callback is not None:
tap = TapTool(callback=CustomJS(code=js_tap_callback)) tap = TapTool(callback=CustomJS(code=js_tap_callback))
plot.add_tools(tap) plot.add_tools(tap)
# Draw boxes (the prefix argument modifies the fields of ColumnDataSource # Draw boxes (the prefix argument modifies the fields of ColumnDataSource
# that are used) # that are used)
@ -128,18 +119,18 @@ def fill_boxplot(
color="black" color="black"
) )
# (Optional) Add server tap callback # (Optional) Add server tap callback
if server_tap_callback != None: if server_tap_callback is not None:
top_stem.data_source.selected.on_change("indices", server_tap_callback) top_stem.data_source.selected.on_change("indices", server_tap_callback)
bottom_stem.data_source.selected.on_change("indices", server_tap_callback) bottom_stem.data_source.selected.on_change(
"indices", server_tap_callback)
full_box.data_source.selected.on_change("indices", server_tap_callback) full_box.data_source.selected.on_change("indices", server_tap_callback)
bottom_box.data_source.selected.on_change("indices", server_tap_callback) bottom_box.data_source.selected.on_change(
"indices", server_tap_callback)
mu_dot.data_source.selected.on_change("indices", server_tap_callback) mu_dot.data_source.selected.on_change("indices", server_tap_callback)
# Plot appearance # Plot appearance
plot.xgrid.grid_line_color = None plot.xgrid.grid_line_color = None
plot.ygrid.grid_line_color = None plot.ygrid.grid_line_color = None
@ -148,4 +139,4 @@ def fill_boxplot(
plot.yaxis[0].formatter.power_limit_low = 0 plot.yaxis[0].formatter.power_limit_low = 0
plot.yaxis[0].formatter.precision = 3 plot.yaxis[0].formatter.precision = 3
plot.xaxis[0].major_label_orientation = pi/8 plot.xaxis[0].major_label_orientation = pi / 8

View File

@ -291,19 +291,19 @@
<!-- PLOTS --> <!-- PLOTS -->
<div class="column is-9"> <div class="column is-9">
<h3 class="title is-3">Plots</h3> <h3 class="title is-3">Plots</h3>
<div class="card plot-card"> <div class="card plot-card" style="z-index: 3;">
{{ embed(roots.s_tabs_inspect) }} {{ embed(roots.s_tabs_inspect) }}
</div> </div>
<br> <br>
<div class="card plot-card"> <div class="card plot-card" style="z-index: 2;">
{{ embed(roots.sigma_inspect) }} {{ embed(roots.sigma_inspect) }}
</div> </div>
<br> <br>
<div class="card plot-card"> <div class="card plot-card" style="z-index: 1;">
{{ embed(roots.mu_inspect) }} {{ embed(roots.mu_inspect) }}
</div> </div>
</div> </div>
@ -325,6 +325,8 @@
</div> </div>
<!-- JAVASCRIPT -->
<script> <script>
// Listen to clicks on breadcrumb (for responsive header) // Listen to clicks on breadcrumb (for responsive header)

View File

@ -29,7 +29,7 @@ run_verificarlo_tests:
- git fetch --all - git fetch --all
- git checkout -b {{ci_branch}} origin/{{ci_branch}} - git checkout -b {{ci_branch}} origin/{{ci_branch}}
- mkdir -p vfcruns - mkdir -p vfcruns
- mv *.vfcrun.hd5 vfcruns - mv *.vfcrun.h5 vfcruns
- git add vfcruns/* - git add vfcruns/*
- git commit -m "[auto] New test results for commit ${git_hash}" - git commit -m "[auto] New test results for commit ${git_hash}"
- git push - git push
@ -39,4 +39,4 @@ run_verificarlo_tests:
artifacts: artifacts:
paths: paths:
- "*.vfcraw.hd5" - "*.vfcraw.h5"

View File

@ -26,7 +26,7 @@ jobs:
- name: Install Python requirements - name: Install Python requirements
run: | run: |
pip install numpy scipy pandas bokeh jinja2 tables GitPython pip install numpy scipy pandas bokeh jinja2 tables GitPython
apt update
apt install wget apt install wget
wget https://raw.githubusercontent.com/verificarlo/significantdigits/main/sigdigits.py -P /usr/local/lib/python3.8/dist-packages wget https://raw.githubusercontent.com/verificarlo/significantdigits/main/sigdigits.py -P /usr/local/lib/python3.8/dist-packages
@ -45,7 +45,7 @@ jobs:
git checkout {{ci_branch}} git checkout {{ci_branch}}
mkdir -p vfcruns mkdir -p vfcruns
mv *.vfcrun.hd5 vfcruns mv *.vfcrun.h5 vfcruns
git add vfcruns/* git add vfcruns/*
git commit -m "[auto] New test results for commit ${git_hash}" git commit -m "[auto] New test results for commit ${git_hash}"
git push git push
@ -54,4 +54,4 @@ jobs:
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
with: with:
{% raw %}name: ${{github.sha}}.vfcraw{% endraw %} {% raw %}name: ${{github.sha}}.vfcraw{% endraw %}
path: ./*.vfcraw.hd5 path: ./*.vfcraw.h5

32
vfc_ci
View File

@ -10,9 +10,10 @@
import argparse import argparse
################################################################################ ##########################################################################
# Parameters validation helpers
# Parameters validation helpers
def is_port(string): def is_port(string):
value = int(string) value = int(string)
@ -31,8 +32,7 @@ def is_directory(string):
return string return string
################################################################################ ##########################################################################
# Subcommand decorator # Subcommand decorator
@ -41,6 +41,7 @@ cli = argparse.ArgumentParser(
) )
subparsers = cli.add_subparsers(dest="subcommand") subparsers = cli.add_subparsers(dest="subcommand")
def subcommand(description="", args=[], parent=subparsers): def subcommand(description="", args=[], parent=subparsers):
def decorator(func): def decorator(func):
parser = parent.add_parser(func.__name__, description=description) parser = parent.add_parser(func.__name__, description=description)
@ -54,15 +55,13 @@ def argument(*name_or_flags, **kwargs):
return ([*name_or_flags], kwargs) return ([*name_or_flags], kwargs)
##########################################################################
################################################################################
# "setup" subcommand # "setup" subcommand
@subcommand( @subcommand(
description="Create an automated workflow to execute Verificarlo tests.", description="Create an automated workflow to execute Verificarlo tests.",
args = [ args=[
argument( argument(
"git_host", "git_host",
help=""" help="""
@ -76,13 +75,12 @@ def setup(args):
import ci.setup import ci.setup
ci.setup.setup(args.git_host) ci.setup.setup(args.git_host)
# "test" subcommand # "test" subcommand
@subcommand( @subcommand(
description="Execute predefined Verificarlo tests and save their results.", description="Execute predefined Verificarlo tests and save their results.",
args = [ args=[
argument( argument(
"-g", "--is-git-commit", "-g", "--is-git-commit",
help=""" help="""
@ -112,15 +110,14 @@ def test(args):
import ci.test import ci.test
ci.test.run(args.is_git_commit, args.export_raw_results, args.dry_run) ci.test.run(args.is_git_commit, args.export_raw_results, args.dry_run)
# "serve" subcommand # "serve" subcommand
@subcommand( @subcommand(
description=""" description="""
Start a server to visualize Verificarlo test results. Start a server to visualize Verificarlo test results.
""", """,
args = [ args=[
argument( argument(
"-s", "--show", "-s", "--show",
help=""" help="""
@ -174,9 +171,9 @@ def test(args):
def serve(args): def serve(args):
# git_directory and git_url are supposed to be exclusive # git_directory and git_url are supposed to be exclusive
if args.git_directory != None and args.git_url != None: if args.git_directory is not None and args.git_url is not None:
raise argparse.ArgumentTypeError( raise argparse.ArgumentTypeError(
"\"-gd\" / \"--git-directory\" and \"-gu\" / \"--git-url\" are "\ "\"-gd\" / \"--git-directory\" and \"-gu\" / \"--git-url\" are "
"mutually exclusive. Please make sure to use at most one of them." "mutually exclusive. Please make sure to use at most one of them."
) )
@ -191,12 +188,9 @@ def serve(args):
) )
############################################################################### ###############################################################################
# Main command group and entry point # Main command group and entry point
if __name__ == "__main__": if __name__ == "__main__":
args = cli.parse_args() args = cli.parse_args()
if args.subcommand is None: if args.subcommand is None: