in flink-kubernetes-operator/src/main/java/org/apache/flink/kubernetes/operator/service/FlinkService.java [57:142]
void submitApplicationCluster(JobSpec jobSpec, Configuration conf, boolean requireHaMetadata)
throws Exception;
boolean isHaMetadataAvailable(Configuration conf);
void submitSessionCluster(Configuration conf) throws Exception;
JobID submitJobToSessionCluster(
ObjectMeta meta,
FlinkSessionJobSpec spec,
Configuration conf,
@Nullable String savepoint)
throws Exception;
boolean isJobManagerPortReady(Configuration config);
Collection<JobStatusMessage> listJobs(Configuration conf) throws Exception;
JobResult requestJobResult(Configuration conf, JobID jobID) throws Exception;
void cancelJob(FlinkDeployment deployment, UpgradeMode upgradeMode, Configuration conf)
throws Exception;
void deleteClusterDeployment(
ObjectMeta meta,
FlinkDeploymentStatus status,
Configuration conf,
boolean deleteHaData);
void cancelSessionJob(FlinkSessionJob sessionJob, UpgradeMode upgradeMode, Configuration conf)
throws Exception;
void triggerSavepoint(
String jobId,
SnapshotTriggerType triggerType,
SavepointInfo savepointInfo,
Configuration conf)
throws Exception;
void triggerCheckpoint(
String jobId,
SnapshotTriggerType triggerType,
CheckpointInfo checkpointInfo,
Configuration conf)
throws Exception;
Optional<Savepoint> getLastCheckpoint(JobID jobId, Configuration conf) throws Exception;
SavepointFetchResult fetchSavepointInfo(String triggerId, String jobId, Configuration conf);
CheckpointFetchResult fetchCheckpointInfo(String triggerId, String jobId, Configuration conf);
Tuple2<
Optional<CheckpointHistoryWrapper.CompletedCheckpointInfo>,
Optional<CheckpointHistoryWrapper.PendingCheckpointInfo>>
getCheckpointInfo(JobID jobId, Configuration conf) throws Exception;
void disposeSavepoint(String savepointPath, Configuration conf) throws Exception;
Map<String, String> getClusterInfo(Configuration conf) throws Exception;
PodList getJmPodList(FlinkDeployment deployment, Configuration conf);
void waitForClusterShutdown(Configuration conf);
ScalingResult scale(FlinkResourceContext<?> resourceContext, Configuration deployConfig)
throws Exception;
boolean scalingCompleted(FlinkResourceContext<?> resourceContext);
Map<String, String> getMetrics(Configuration conf, String jobId, List<String> metricNames)
throws Exception;
RestClusterClient<String> getClusterClient(Configuration conf) throws Exception;
JobDetailsInfo getJobDetailsInfo(JobID jobID, Configuration conf) throws Exception;
/** Result of an in-place scaling operation. */
enum ScalingResult {
// Scaling triggered by the operation
SCALING_TRIGGERED,
// Job already scaled to target previously
ALREADY_SCALED,
// Cannot execute scaling, full upgrade required
CANNOT_SCALE;
}