Produce a dashboard website after running tests (#152)

This commit is contained in:
Val Lorentz 2022-04-10 10:40:39 +02:00 committed by GitHub
parent 3083aeeb24
commit edf3e5904b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 653 additions and 94 deletions

99
.github/deploy_to_netlify.py vendored Executable file
View File

@ -0,0 +1,99 @@
#!/usr/bin/env python3
import json
import os
import re
import subprocess
import urllib.request
with open(os.environ["GITHUB_EVENT_PATH"]) as fd:
github_event = json.load(fd)
context_suffix = ""
command = ["netlify", "deploy", "--dir=dashboard/"]
if "pull_request" in github_event and "number" in github_event:
pr_number = github_event["number"]
sha = github_event["after"]
# Aliases can't exceed 37 chars
command.extend(["--alias", f"pr-{pr_number}-{sha[0:10]}"])
context_suffix = " (pull_request)"
else:
ref = github_event["ref"]
m = re.match("refs/heads/(.*)", ref)
if m:
branch = m.group(1)
sha = github_event["head_commit"]["id"]
command.extend(["--alias", f"br-{branch[0:23]}-{sha[0:10]}"])
if branch in ("main", "master"):
command.extend(["--prod"])
else:
context_suffix = " (push)"
else:
# TODO
pass
proc = subprocess.run(command, capture_output=True)
output = proc.stdout.decode()
assert proc.returncode == 0, (output, proc.stderr.decode())
m = re.search("https://[^ ]*--[^ ]*netlify.app", output)
assert m
netlify_site_url = m.group(0)
target_url = f"{netlify_site_url}/index.xhtml"
def send_status() -> None:
statuses_url = github_event["repository"]["statuses_url"].format(sha=sha)
payload = {
"state": "success",
"context": f"Dashboard{context_suffix}",
"description": "Table of all test results",
"target_url": target_url,
}
request = urllib.request.Request(
statuses_url,
data=json.dumps(payload).encode(),
headers={
"Authorization": f'token {os.environ["GITHUB_TOKEN"]}',
"Content-Type": "text/json",
"Accept": "application/vnd.github+json",
},
)
response = urllib.request.urlopen(request)
assert response.status == 201, response.read()
send_status()
def send_pr_comment() -> None:
comments_url = github_event["pull_request"]["_links"]["comments"]["href"]
payload = {
"body": f"[Test results]({target_url})",
}
request = urllib.request.Request(
comments_url,
data=json.dumps(payload).encode(),
headers={
"Authorization": f'token {os.environ["GITHUB_TOKEN"]}',
"Content-Type": "text/json",
"Accept": "application/vnd.github+json",
},
)
response = urllib.request.urlopen(request)
assert response.status == 201, response.read()
if "pull_request" in github_event:
send_pr_comment()

View File

@ -369,7 +369,7 @@ jobs:
retention-days: 1
publish-test-results:
if: success() || failure()
name: Publish Unit Tests Results
name: Publish Dashboard
needs:
- test-bahamut
- test-bahamut-anope
@ -397,27 +397,23 @@ jobs:
uses: actions/download-artifact@v2
with:
path: artifacts
- if: github.event_name == 'pull_request'
name: Publish Unit Test Results
uses: actions/github-script@v4
with:
result-encoding: string
script: |
let body = '';
const options = {};
options.listeners = {
stdout: (data) => {
body += data.toString();
}
};
await exec.exec('bash', ['-c', 'shopt -s globstar; python3 report.py artifacts/**/*.xml'], options);
github.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: body,
});
return body;
- name: Install dashboard dependencies
run: |-
python -m pip install --upgrade pip
pip install defusedxml
- name: Generate dashboard
run: |-
shopt -s globstar
python3 -m irctest.dashboard.format dashboard/ artifacts/**/*.xml
echo '/ /index.xhtml' > dashboard/_redirects
- name: Install netlify-cli
run: npm i -g netlify-cli
- env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }}
NETLIFY_SITE_ID: ${{ secrets.NETLIFY_SITE_ID }}
name: Deploy to Netlify
run: ./.github/deploy_to_netlify.py
test-bahamut:
needs:
- build-bahamut

View File

@ -71,7 +71,7 @@ jobs:
retention-days: 1
publish-test-results:
if: success() || failure()
name: Publish Unit Tests Results
name: Publish Dashboard
needs:
- test-inspircd
- test-inspircd-anope
@ -83,27 +83,23 @@ jobs:
uses: actions/download-artifact@v2
with:
path: artifacts
- if: github.event_name == 'pull_request'
name: Publish Unit Test Results
uses: actions/github-script@v4
with:
result-encoding: string
script: |
let body = '';
const options = {};
options.listeners = {
stdout: (data) => {
body += data.toString();
}
};
await exec.exec('bash', ['-c', 'shopt -s globstar; python3 report.py artifacts/**/*.xml'], options);
github.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: body,
});
return body;
- name: Install dashboard dependencies
run: |-
python -m pip install --upgrade pip
pip install defusedxml
- name: Generate dashboard
run: |-
shopt -s globstar
python3 -m irctest.dashboard.format dashboard/ artifacts/**/*.xml
echo '/ /index.xhtml' > dashboard/_redirects
- name: Install netlify-cli
run: npm i -g netlify-cli
- env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }}
NETLIFY_SITE_ID: ${{ secrets.NETLIFY_SITE_ID }}
name: Deploy to Netlify
run: ./.github/deploy_to_netlify.py
test-inspircd:
needs:
- build-inspircd

View File

@ -409,7 +409,7 @@ jobs:
retention-days: 1
publish-test-results:
if: success() || failure()
name: Publish Unit Tests Results
name: Publish Dashboard
needs:
- test-bahamut
- test-bahamut-anope
@ -440,27 +440,23 @@ jobs:
uses: actions/download-artifact@v2
with:
path: artifacts
- if: github.event_name == 'pull_request'
name: Publish Unit Test Results
uses: actions/github-script@v4
with:
result-encoding: string
script: |
let body = '';
const options = {};
options.listeners = {
stdout: (data) => {
body += data.toString();
}
};
await exec.exec('bash', ['-c', 'shopt -s globstar; python3 report.py artifacts/**/*.xml'], options);
github.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: body,
});
return body;
- name: Install dashboard dependencies
run: |-
python -m pip install --upgrade pip
pip install defusedxml
- name: Generate dashboard
run: |-
shopt -s globstar
python3 -m irctest.dashboard.format dashboard/ artifacts/**/*.xml
echo '/ /index.xhtml' > dashboard/_redirects
- name: Install netlify-cli
run: npm i -g netlify-cli
- env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }}
NETLIFY_SITE_ID: ${{ secrets.NETLIFY_SITE_ID }}
name: Deploy to Netlify
run: ./.github/deploy_to_netlify.py
test-bahamut:
needs:
- build-bahamut

322
irctest/dashboard/format.py Normal file
View File

@ -0,0 +1,322 @@
import base64
import dataclasses
import gzip
import hashlib
from pathlib import Path
import re
import sys
from typing import (
IO,
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Tuple,
TypeVar,
)
import xml.dom.minidom
import xml.etree.ElementTree as ET
from defusedxml.ElementTree import parse as parse_xml
NETLIFY_CHAR_BLACKLIST = frozenset('":<>|*?\r\n#')
"""Characters not allowed in output filenames"""
@dataclasses.dataclass
class CaseResult:
module_name: str
class_name: str
test_name: str
job: str
success: bool
skipped: bool
system_out: Optional[str]
details: Optional[str] = None
type: Optional[str] = None
message: Optional[str] = None
def output_filename(self):
test_name = self.test_name
if len(test_name) > 50 or set(test_name) & NETLIFY_CHAR_BLACKLIST:
# File name too long or otherwise invalid. This should be good enough:
m = re.match(r"(?P<function_name>\w+?)\[(?P<params>.+)\]", test_name)
assert m, "File name is too long but has no parameter."
test_name = f'{m.group("function_name")}[{md5sum(m.group("params"))}]'
return f"{self.job}_{self.module_name}.{self.class_name}.{test_name}.txt"
TK = TypeVar("TK")
TV = TypeVar("TV")
def md5sum(text: str) -> str:
return base64.urlsafe_b64encode(hashlib.md5(text.encode()).digest()).decode()
def group_by(list_: Iterable[TV], key: Callable[[TV], TK]) -> Dict[TK, List[TV]]:
groups: Dict[TK, List[TV]] = {}
for value in list_:
groups.setdefault(key(value), []).append(value)
return groups
def iter_job_results(job_file_name: Path, job: ET.ElementTree) -> Iterator[CaseResult]:
(suite,) = job.getroot()
for case in suite:
if "name" not in case.attrib:
continue
success = True
skipped = False
details = None
system_out = None
extra = {}
for child in case:
if child.tag == "skipped":
success = True
skipped = True
details = None
extra = child.attrib
elif child.tag in ("failure", "error"):
success = False
skipped = False
details = child.text
extra = child.attrib
elif child.tag == "system-out":
assert (
system_out is None
# for some reason, skipped tests have two system-out;
# and the second one contains test teardown
or child.text.startswith(system_out.rstrip())
), ("Duplicate system-out tag", repr(system_out), repr(child.text))
system_out = child.text
else:
assert False, child
(module_name, class_name) = case.attrib["classname"].rsplit(".", 1)
m = re.match(
r"(.*/)?pytest[ -]results[ _](?P<name>.*)"
r"[ _][(]?(stable|release|devel|devel_release)[)]?/pytest.xml(.gz)?",
str(job_file_name),
)
assert m, job_file_name
yield CaseResult(
module_name=module_name,
class_name=class_name,
test_name=case.attrib["name"],
job=m.group("name"),
success=success,
skipped=skipped,
details=details,
system_out=system_out,
**extra,
)
def build_module_html(
jobs: List[str], results: List[CaseResult], module_name: str
) -> ET.Element:
root = ET.Element("html")
head = ET.SubElement(root, "head")
ET.SubElement(head, "title").text = module_name
ET.SubElement(head, "link", rel="stylesheet", type="text/css", href="./style.css")
body = ET.SubElement(root, "body")
ET.SubElement(body, "h1").text = module_name
results_by_class = group_by(results, lambda r: r.class_name)
table = ET.SubElement(body, "table")
table.set("class", "test-matrix")
job_row = ET.Element("tr")
ET.SubElement(job_row, "th") # column of case name
for job in jobs:
cell = ET.SubElement(job_row, "th")
ET.SubElement(ET.SubElement(cell, "div"), "span").text = job
cell.set("class", "job-name")
for (class_name, class_results) in results_by_class.items():
# Header row: class name
header_row = ET.SubElement(table, "tr")
th = ET.SubElement(header_row, "th", colspan=str(len(jobs) + 1))
row_anchor = f"{class_name}"
section_header = ET.SubElement(
ET.SubElement(th, "h2"),
"a",
href=f"#{row_anchor}",
id=row_anchor,
)
section_header.text = class_name
# Header row: one column for each implementation
table.append(job_row)
# One row for each test:
results_by_test = group_by(class_results, key=lambda r: r.test_name)
for (test_name, test_results) in results_by_test.items():
row_anchor = f"{class_name}.{test_name}"
if len(row_anchor) >= 50:
# Too long; give up on generating readable URL
# TODO: only hash test parameter
row_anchor = md5sum(row_anchor)
row = ET.SubElement(table, "tr", id=row_anchor)
cell = ET.SubElement(row, "th")
cell.set("class", "test-name")
cell_link = ET.SubElement(cell, "a", href=f"#{row_anchor}")
cell_link.text = test_name
results_by_job = group_by(test_results, key=lambda r: r.job)
for job_name in jobs:
cell = ET.SubElement(row, "td")
try:
(result,) = results_by_job[job_name]
except KeyError:
cell.set("class", "deselected")
cell.text = "d"
continue
text: Optional[str]
if result.skipped:
cell.set("class", "skipped")
if result.type == "pytest.skip":
text = "s"
else:
text = result.type
elif result.success:
cell.set("class", "success")
if result.type:
# dead code?
text = result.type
else:
text = "."
else:
cell.set("class", "failure")
if result.type:
# dead code?
text = result.type
else:
text = "f"
if result.system_out:
# There is a log file; link to it.
a = ET.SubElement(cell, "a", href=f"./{result.output_filename()}")
a.text = text or "?"
else:
cell.text = text or "?"
# Hacky: ET expects the namespace to be present in every tag we create instead;
# but it would be excessively verbose.
root.set("xmlns", "http://www.w3.org/1999/xhtml")
return root
def write_html_pages(
output_dir: Path, results: List[CaseResult]
) -> List[Tuple[str, str]]:
"""Returns the list of (module_name, file_name)."""
output_dir.mkdir(parents=True, exist_ok=True)
results_by_module = group_by(results, lambda r: r.module_name)
# used as columns
jobs = list(sorted({r.job for r in results}))
pages = []
for (module_name, module_results) in results_by_module.items():
root = build_module_html(jobs, module_results, module_name)
file_name = f"{module_name}.xhtml"
write_xml_file(output_dir / file_name, root)
pages.append((module_name, file_name))
return pages
def write_test_outputs(output_dir: Path, results: List[CaseResult]) -> None:
"""Writes stdout files of each test."""
for result in results:
if result.system_out is None:
continue
output_file = output_dir / result.output_filename()
with output_file.open("wt") as fd:
fd.write(result.system_out)
def write_html_index(output_dir: Path, pages: List[Tuple[str, str]]) -> None:
root = ET.Element("html")
head = ET.SubElement(root, "head")
ET.SubElement(head, "title").text = "irctest dashboard"
ET.SubElement(head, "link", rel="stylesheet", type="text/css", href="./style.css")
body = ET.SubElement(root, "body")
ET.SubElement(body, "h1").text = "irctest dashboard"
ul = ET.SubElement(body, "ul")
for (module_name, file_name) in sorted(pages):
link = ET.SubElement(ET.SubElement(ul, "li"), "a", href=f"./{file_name}")
link.text = module_name
root.set("xmlns", "http://www.w3.org/1999/xhtml")
write_xml_file(output_dir / "index.xhtml", root)
def write_assets(output_dir: Path) -> None:
css_path = output_dir / "style.css"
source_css_path = Path(__file__).parent / "style.css"
with css_path.open("wt") as fd:
with source_css_path.open() as source_fd:
fd.write(source_fd.read())
def write_xml_file(filename: Path, root: ET.Element) -> None:
# Serialize
s = ET.tostring(root)
# Prettify
s = xml.dom.minidom.parseString(s).toprettyxml(indent=" ")
with filename.open("wt") as fd:
fd.write(s) # type: ignore
def parse_xml_file(filename: Path) -> ET.ElementTree:
fd: IO
if filename.suffix == ".gz":
with gzip.open(filename, "rb") as fd: # type: ignore
return parse_xml(fd) # type: ignore
else:
with open(filename) as fd:
return parse_xml(fd) # type: ignore
def main(output_path: Path, filenames: List[Path]) -> int:
results = [
result
for filename in filenames
for result in iter_job_results(filename, parse_xml_file(filename))
]
pages = write_html_pages(output_path, results)
write_html_index(output_path, pages)
write_test_outputs(output_path, results)
write_assets(output_path)
return 0
if __name__ == "__main__":
(_, output_path, *filenames) = sys.argv
exit(main(Path(output_path), list(map(Path, filenames))))

View File

@ -0,0 +1,86 @@
import dataclasses
import gzip
import io
import json
from pathlib import Path
import sys
from typing import Iterator
import urllib.parse
import urllib.request
import zipfile
@dataclasses.dataclass
class Artifact:
repo: str
run_id: int
name: str
download_url: str
@property
def public_download_url(self):
# GitHub API is not available publicly for artifacts, we need to use
# a third-party proxy to access it...
name = urllib.parse.quote(self.name)
return f"https://nightly.link/{repo}/actions/runs/{self.run_id}/{name}.zip"
def iter_run_artifacts(repo: str, run_id: int) -> Iterator[Artifact]:
request = urllib.request.Request(
f"https://api.github.com/repos/{repo}/actions/runs/{run_id}/artifacts",
headers={"Accept": "application/vnd.github.v3+json"},
)
response = urllib.request.urlopen(request)
for artifact in json.load(response)["artifacts"]:
if not artifact["name"].startswith(("pytest_results_", "pytest results ")):
continue
if artifact["expired"]:
continue
yield Artifact(
repo=repo,
run_id=run_id,
name=artifact["name"],
download_url=artifact["archive_download_url"],
)
def download_artifact(output_name: Path, url: str) -> None:
if output_name.exists():
return
response = urllib.request.urlopen(url)
archive_bytes = response.read() # Can't stream it, it's a ZIP
with zipfile.ZipFile(io.BytesIO(archive_bytes)) as archive:
with archive.open("pytest.xml") as input_fd:
pytest_xml = input_fd.read()
tmp_output_path = output_name.with_suffix(".tmp")
with gzip.open(tmp_output_path, "wb") as output_fd:
output_fd.write(pytest_xml)
# Atomically write to the output path, so that we don't write partial files in case
# the download process is interrupted
tmp_output_path.rename(output_name)
def main(output_dir: Path, repo: str, run_id: int) -> int:
output_dir.mkdir(parents=True, exist_ok=True)
run_path = output_dir / str(run_id)
run_path.mkdir(exist_ok=True)
for artifact in iter_run_artifacts(repo, run_id):
artifact_path = run_path / artifact.name / "pytest.xml.gz"
artifact_path.parent.mkdir(exist_ok=True)
try:
download_artifact(artifact_path, artifact.download_url)
except Exception:
download_artifact(artifact_path, artifact.public_download_url)
print("downloaded", artifact.name)
return 0
if __name__ == "__main__":
(_, output_path, repo, run_id) = sys.argv
exit(main(Path(output_path), repo, int(run_id)))

View File

@ -0,0 +1,60 @@
@media (prefers-color-scheme: dark) {
body {
background-color: #121212;
color: rgba(255, 255, 255, 0.87);
}
a {
filter: invert(0.85) hue-rotate(180deg);
}
}
/* Only 1px solid border between cells */
table.test-matrix {
border-spacing: 0;
border-collapse: collapse;
}
table.test-matrix td {
text-align: center;
border: 1px solid grey;
}
/* Make link take the whole cell */
table.test-matrix td a {
display: block;
margin: 0;
padding: 0;
width: 100%;
height: 100%;
color: black;
text-decoration: none;
}
/* Test matrix colors */
table.test-matrix .deselected {
background-color: grey;
}
table.test-matrix .success {
background-color: green;
}
table.test-matrix .skipped {
background-color: yellow;
}
table.test-matrix .failure {
background-color: red;
}
/* Rotate headers, thanks to https://css-tricks.com/rotated-table-column-headers/ */
th.job-name {
height: 140px;
white-space: nowrap;
}
th.job-name > div {
transform:
translate(28px, 50px)
rotate(315deg);
width: 40px;
}
th.job-name > div > span {
border-bottom: 1px solid grey;
padding-left: 0px;
}

View File

@ -10,7 +10,6 @@ and keep them in sync.
import enum
import pathlib
import textwrap
import yaml
@ -351,7 +350,7 @@ def generate_workflow(config: dict, version_flavor: VersionFlavor):
jobs[f"test-{test_id}"] = test_job
jobs["publish-test-results"] = {
"name": "Publish Unit Tests Results",
"name": "Publish Dashboard",
"needs": sorted({f"test-{test_id}" for test_id in config["tests"]} & set(jobs)),
"runs-on": "ubuntu-latest",
# the build-and-test job might be skipped, we don't need to run
@ -365,32 +364,31 @@ def generate_workflow(config: dict, version_flavor: VersionFlavor):
"with": {"path": "artifacts"},
},
{
"name": "Publish Unit Test Results",
"uses": "actions/github-script@v4",
"if": "github.event_name == 'pull_request'",
"with": {
"result-encoding": "string",
"script": script(
textwrap.dedent(
"""\
let body = '';
const options = {};
options.listeners = {
stdout: (data) => {
body += data.toString();
}
};
await exec.exec('bash', ['-c', 'shopt -s globstar; python3 report.py artifacts/**/*.xml'], options);
github.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: body,
});
return body;
"""
)
),
"name": "Install dashboard dependencies",
"run": script(
"python -m pip install --upgrade pip",
"pip install defusedxml",
),
},
{
"name": "Generate dashboard",
"run": script(
"shopt -s globstar",
"python3 -m irctest.dashboard.format dashboard/ artifacts/**/*.xml",
"echo '/ /index.xhtml' > dashboard/_redirects",
),
},
{
"name": "Install netlify-cli",
"run": "npm i -g netlify-cli",
},
{
"name": "Deploy to Netlify",
"run": "./.github/deploy_to_netlify.py",
"env": {
"NETLIFY_SITE_ID": "${{ secrets.NETLIFY_SITE_ID }}",
"NETLIFY_AUTH_TOKEN": "${{ secrets.NETLIFY_AUTH_TOKEN }}",
"GITHUB_TOKEN": "${{ secrets.GITHUB_TOKEN }}",
},
},
],

View File

@ -12,6 +12,9 @@ disallow_untyped_defs = False
[mypy-irctest.client_tests.*]
disallow_untyped_defs = False
[mypy-defusedxml.*]
ignore_missing_imports = True
[mypy-ecdsa]
ignore_missing_imports = True

View File

@ -39,3 +39,6 @@ markers =
WHOX
python_classes = *TestCase Test*
# Include stdout in pytest.xml files used by the dashboard.
junit_logging = system-out