in bot/code_review_bot/workflow.py [0:0]
def find_issues(self, revision, group_id):
"""
Find all issues on remote Taskcluster task group
"""
# Load all tasks in task group
tasks = self.queue_service.listTaskGroup(group_id)
assert "tasks" in tasks
tasks = {task["status"]["taskId"]: task for task in tasks["tasks"]}
assert len(tasks) > 0
logger.info("Loaded Taskcluster group", id=group_id, tasks=len(tasks))
# Store the revision in the backend (or retrieve an existing one)
rev = self.backend_api.publish_revision(revision)
assert (
rev is not None
), "Stopping early because revision could not be created nor retrieved from the backend"
# Load task description
task = tasks.get(settings.try_task_id)
assert task is not None, f"Missing task {settings.try_task_id}"
dependencies = task["task"]["dependencies"]
assert len(dependencies) > 0, "No task dependencies to analyze"
# Skip dependencies not in group
# But log all skipped tasks
def _in_group(dep_id):
if dep_id not in tasks:
# Used for docker images produced in tree
# and other artifacts
logger.info("Skip dependency not in group", task_id=dep_id)
return False
return True
dependencies = [dep_id for dep_id in dependencies if _in_group(dep_id)]
# Do not run parsers when we only have a gecko decision task
# That means no analyzer were triggered by the taskgraph decision task
# This can happen if the patch only touches file types for which we have no analyzer defined
# See issue https://github.com/mozilla/release-services/issues/2055
if len(dependencies) == 1:
task = tasks[dependencies[0]]
if task["task"]["metadata"]["name"] == "Gecko Decision Task":
logger.warn("Only dependency is a Decision Task, skipping analysis")
return [], [], [], []
# Add zero-coverage task
if self.zero_coverage_enabled:
dependencies.append(ZeroCoverageTask)
# Find issues and patches in dependencies
issues = []
task_failures = []
notices = []
for dep in dependencies:
try:
if isinstance(dep, type) and issubclass(dep, AnalysisTask):
# Build a class instance from its definition and route
task = dep.build_from_route(self.index_service, self.queue_service)
else:
# Use a task from its id & description
task = self.build_task(tasks[dep])
if task is None:
continue
artifacts = task.load_artifacts(self.queue_service)
if artifacts is not None:
task_issues, task_patches = [], []
if isinstance(task, AnalysisTask):
task_issues = task.parse_issues(artifacts, revision)
logger.info(
f"Found {len(task_issues)} issues",
task=task.name,
id=task.id,
)
stats.report_task(task, task_issues)
issues += task_issues
task_patches = task.build_patches(artifacts)
for patch in task_patches:
revision.add_improvement_patch(task, patch)
elif isinstance(task, NoticeTask):
notice = task.build_notice(artifacts, revision)
if notice:
notices.append(notice)
# Report a problem when tasks in erroneous state are found
# but no issue or patch has been processed by the bot
if task.state == "failed" and not task_issues and not task_patches:
# Skip task that are listed as ignorable (we try to avoid unnecessary spam)
if task.name in self.task_failures_ignored:
logger.warning(
"Ignoring task failure as configured",
task=task.name,
id=task.id,
)
continue
logger.warning(
"An erroneous task processed some artifacts and found no issues or patches",
task=task.name,
id=task.id,
)
task_failures.append(task)
except Exception as e:
logger.warn(
"Failure during task analysis",
task=settings.taskcluster.task_id,
error=e,
)
raise
reviewers = (
task.extra_reviewers_groups if task and isinstance(task, BaseTask) else []
)
return issues, task_failures, notices, reviewers