in spark-job/src/main/java/org/apache/cassandra/diff/JobMetadataDb.java [471:490]
public void markNotRunning(UUID jobId) {
try
{
logger.info("Marking job {} as not running", jobId);
ResultSet rs = session.execute(String.format("DELETE FROM %s.%s WHERE job_id = ? IF EXISTS",
metadataKeyspace, Schema.RUNNING_JOBS),
jobId);
if (!rs.one().getBool("[applied]"))
{
logger.warn("Non-fatal: Unable to mark job %s as not running, check logs for errors " +
"during initialization as there may be no entry for this job {} in the {} table",
jobId, Schema.RUNNING_JOBS);
}
} catch (Exception e) {
// Because this is called from another exception handler, we don't want to lose the original exception
// just because we may not have been able to mark the job as not running. Just log here
logger.error("Could not mark job {} as not running.", jobId, e);
}
}