in server/src/main/java/org/apache/cassandra/sidecar/db/RestoreJobDatabaseAccessor.java [116:172]
public RestoreJob update(UpdateRestoreJobRequestPayload payload, UUID jobId)
throws DataObjectMappingException
{
sidecarSchema.ensureInitialized();
RestoreJob.Builder updateBuilder = RestoreJob.builder();
LocalDate createdAt = RestoreJob.toLocalDate(jobId);
updateBuilder.createdAt(createdAt)
.jobId(jobId);
RestoreJobSecrets secrets = payload.secrets();
RestoreJobStatus status = payload.status();
String jobAgent = payload.jobAgent();
Date expireAt = payload.expireAtAsDate();
Long sliceCount = payload.sliceCount();
// all updates are going to the same partition. We use unlogged explicitly.
// Cassandra internally combine those updates into the same mutation.
BatchStatement batchStatement = new BatchStatement(BatchStatement.Type.UNLOGGED);
ByteBuffer wrappedSecrets;
if (secrets != null)
{
try
{
byte[] secretBytes = MAPPER.writeValueAsBytes(secrets);
wrappedSecrets = ByteBuffer.wrap(secretBytes);
batchStatement.add(tableSchema.updateBlobSecrets()
.bind(createdAt, jobId, wrappedSecrets));
}
catch (JsonProcessingException e)
{
throw new DataObjectMappingException("Failed to serialize secrets", e);
}
updateBuilder.jobSecrets(secrets);
}
if (status != null)
{
batchStatement.add(tableSchema.updateStatus().bind(createdAt, jobId, status.name()));
updateBuilder.jobStatus(status);
}
if (jobAgent != null)
{
batchStatement.add(tableSchema.updateJobAgent().bind(createdAt, jobId, jobAgent));
updateBuilder.jobAgent(jobAgent);
}
if (expireAt != null)
{
batchStatement.add(tableSchema.updateExpireAt().bind(createdAt, jobId, expireAt));
updateBuilder.expireAt(expireAt);
}
if (sliceCount != null)
{
batchStatement.add(tableSchema.updateSliceCount().bind(createdAt, jobId, sliceCount));
updateBuilder.sliceCount(sliceCount);
}
execute(batchStatement);
return updateBuilder.build();
}