in spark-load/spark-load-core/src/main/java/org/apache/doris/config/JobConfig.java [165:202]
public void checkTaskInfo() {
Map<String, TaskInfo> tasks = getLoadTasks();
Preconditions.checkArgument(!tasks.isEmpty(), "loadTasks is empty");
for (Map.Entry<String, TaskInfo> entry : tasks.entrySet()) {
String table = entry.getKey();
try {
DorisClient.FeClient feClient = DorisClient.getFeClient(feAddresses, user, password);
String ddl = feClient.getDDL(database, table);
if (StringUtils.isNoneBlank(ddl) && ddl.contains("\"enable_unique_key_merge_on_write\" = \"true\"")) {
throw new IllegalArgumentException("Merge On Write is not supported");
}
} catch (SparkLoadException e) {
throw new IllegalArgumentException("check table failed", e);
}
TaskInfo taskInfo = entry.getValue();
switch (taskInfo.getType()) {
case HIVE:
Preconditions.checkArgument(StringUtils.isNoneBlank(taskInfo.getHiveDatabase()),
"hive database is empty");
Preconditions.checkArgument(StringUtils.isNoneBlank(taskInfo.getHiveTable()),
"hive table is empty");
break;
case FILE:
Preconditions.checkArgument(taskInfo.getPaths() != null && !taskInfo.getPaths().isEmpty(),
"file path is empty");
Preconditions.checkArgument(
StringUtils.equalsAnyIgnoreCase(taskInfo.getFormat(), "parquet", "orc", "csv"),
"format only support parquet or orc or csv");
if ("csv".equalsIgnoreCase(taskInfo.getFormat())) {
Preconditions.checkArgument(StringUtils.isNoneEmpty(taskInfo.getFieldSep()),
"field separator is empty");
}
break;
default:
throw new IllegalArgumentException("task type only supports hive or file");
}
}
}