in libmozdata/patchanalysis.py [0:0]
def parse_uplift_comment(text, bug_id=None):
"""
Parse a raw uplift comment to render
links and headers as HTML
"""
headers = (
r"Feature/regressing bug #",
r"Feature/Bug causing the regression",
r"User impact if declined",
r"Is this code covered by automated tests\?",
r"Has the fix been verified in Nightly\?",
r"Describe test coverage new/current, TreeHerder",
r"Needs manual test from QE\? If yes, steps to reproduce",
r"List of other uplifts needed for the feature/fix",
r"Risks and why",
r"Is the change risky\?",
r"Why is the change risky/not risky\?",
r"String/UUID change made/needed",
r"String changes made/needed",
)
no_header = "no-header"
def _replace_link(pattern, link, output, line):
replacement = '<a href="{}" target="_blank">{}</a>'.format(link, output)
return re.sub(pattern, replacement, line, flags=re.IGNORECASE)
def _parse_line(h, v):
# Detect initial http links first
v = _replace_link(r"(https?://[\w\.\/_@#-]*)", r"\1", r"\1", v)
# Bug XXX goes to bugzilla
v = _replace_link(r"bug (\d+)", r"{}/\1".format(Bugzilla.URL), r"Bug \1", v)
# Attachment XXX goes to bugzilla
v = _replace_link(
r"attachment (\d+)",
r"{}/attachment.cgi?id=\1&action=edit".format(Bugzilla.URL),
r"Attachment \1",
v,
)
# Comment XXX goes to bugzilla
if bug_id is not None:
v = _replace_link(
r"comment (\d+)",
r"{}/show_bug.cgi?id={}#c\1".format(Bugzilla.URL, bug_id),
r"Comment \1",
v,
)
# Add to output structure
if h == no_header:
key = no_header
else:
# Build clean key from header
parts = re.sub(r"[^\w]+", " ", h.lower()).split(" ")[:3]
key = "-".join(parts)
if key not in out:
out[key] = {"title": h, "lines": [], "risky": False}
if v != "":
out[key]["lines"].append(v)
# Remove html entities
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
text = "".join(html_escape_table.get(c, c) for c in text)
lines = text.split("\n")
# Build headers
header_regex = r"^\[({})\]:?\s*(.*)".format("|".join(headers))
header_regex = re.compile(header_regex, re.IGNORECASE)
out = collections.OrderedDict()
# Detect headers
header = no_header
for line in lines:
match = header_regex.match(line)
if match:
# Add on a new header
header, post_header = match.groups()
_parse_line(header, post_header)
else:
# Add on last header
_parse_line(header, line)
def _cleanup_lines(lines):
text = re.sub(r"[^\w]+", " ", "".join(lines))
return text.lower().strip()
# Detect risks on specific items
if "risks-and-why" in out:
# If risk is tagged as "medium" or "high"
cleaned = _cleanup_lines(out["risks-and-why"]["lines"])
out["risks-and-why"]["risky"] = cleaned in ("medium", "high")
if "string-uuid-change" in out:
# If the "string/UUID change" is set to anything but "No or None or N/A".
cleaned = _cleanup_lines(out["string-uuid-change"]["lines"])
out["string-uuid-change"]["risky"] = cleaned not in ("no", "none", "n a")
if "describe-test-coverage" in out:
# If test coverage question is empty or No or N/A
cleaned = _cleanup_lines(out["describe-test-coverage"]["lines"])
out["describe-test-coverage"]["risky"] = cleaned in ("", "no", "none", "n a")
# Build complete html output
html = ""
for key, p in out.items():
css_classes = [key]
if p["risky"]:
css_classes.append("risky")
if key != no_header:
html += '<h1 class="{}">{}</h1>'.format(" ".join(css_classes), p["title"])
html += '<div class="{}">{}</div>'.format(
" ".join(css_classes), "<br />".join(p["lines"])
)
return html