in fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java [497:621]
public static String getCreateTableSql(String dbName, String tableName,
String tableComment, List<String> columnsSql, List<String> partitionColumnsSql,
boolean isPrimaryKeyUnique, List<String> primaryKeysSql,
List<String> foreignKeysSql, String kuduPartitionByParams, Pair<List<String>,
TSortingOrder> sortProperties, Map<String, String> tblProperties,
Map<String, String> serdeParameters, boolean isExternal, boolean ifNotExists,
RowFormat rowFormat, HdfsFileFormat fileFormat, HdfsCompression compression,
String storageHandlerClass, HdfsUri location, String icebergPartitions,
TBucketInfo bucketInfo) {
Preconditions.checkNotNull(tableName);
StringBuilder sb = new StringBuilder("CREATE ");
if (isExternal) sb.append("EXTERNAL ");
sb.append("TABLE ");
if (ifNotExists) sb.append("IF NOT EXISTS ");
if (dbName != null) sb.append(dbName + ".");
sb.append(tableName);
if (columnsSql != null && !columnsSql.isEmpty()) {
sb.append(" (\n ");
sb.append(Joiner.on(",\n ").join(columnsSql));
if (CollectionUtils.isNotEmpty(primaryKeysSql)) {
sb.append(",\n ");
sb.append(KuduUtil.getPrimaryKeyString(isPrimaryKeyUnique)).append(" (");
Joiner.on(", ").appendTo(sb, primaryKeysSql).append(")");
}
if (CollectionUtils.isNotEmpty(foreignKeysSql)) {
sb.append(",\n FOREIGN KEY");
Joiner.on(",\n FOREIGN KEY").appendTo(sb, foreignKeysSql).append("\n");
}
sb.append("\n)");
} else {
// CTAS for Kudu tables still print the primary key
if (primaryKeysSql != null && !primaryKeysSql.isEmpty()) {
sb.append("\n ");
sb.append(KuduUtil.getPrimaryKeyString(isPrimaryKeyUnique)).append(" (");
Joiner.on(", ").appendTo(sb, primaryKeysSql).append(")");
}
}
sb.append("\n");
if (partitionColumnsSql != null && partitionColumnsSql.size() > 0) {
sb.append(String.format("PARTITIONED BY (\n %s\n)\n",
Joiner.on(", \n ").join(partitionColumnsSql)));
}
if (kuduPartitionByParams != null && !kuduPartitionByParams.equals("")) {
sb.append("PARTITION BY " + kuduPartitionByParams + "\n");
}
if (bucketInfo != null && bucketInfo.getBucket_type() != TBucketType.NONE) {
sb.append(String.format("CLUSTERED BY (\n %s\n)\n",
Joiner.on(", \n ").join(bucketInfo.getBucket_columns())));
if (sortProperties.first != null) {
sb.append(String.format("SORT BY %s (\n %s\n)\n",
sortProperties.second.toString(),
Joiner.on(", \n ").join(sortProperties.first)));
}
sb.append(String.format("INTO %s BUCKETS\n", bucketInfo.getNum_bucket()));
} else if (sortProperties.first != null) {
sb.append(String.format("SORT BY %s (\n %s\n)\n", sortProperties.second.toString(),
Joiner.on(", \n ").join(sortProperties.first)));
}
if (icebergPartitions != null && !icebergPartitions.isEmpty()) {
sb.append("PARTITIONED BY SPEC\n");
sb.append(icebergPartitions);
sb.append("\n");
}
if (tableComment != null) sb.append(" COMMENT '" + tableComment + "'\n");
if (rowFormat != null && !rowFormat.isDefault()) {
sb.append("ROW FORMAT DELIMITED");
if (rowFormat.getFieldDelimiter() != null) {
String fieldDelim = StringEscapeUtils.escapeJava(rowFormat.getFieldDelimiter());
sb.append(" FIELDS TERMINATED BY '" + fieldDelim + "'");
}
if (rowFormat.getEscapeChar() != null) {
String escapeChar = StringEscapeUtils.escapeJava(rowFormat.getEscapeChar());
sb.append(" ESCAPED BY '" + escapeChar + "'");
}
if (rowFormat.getLineDelimiter() != null) {
String lineDelim = StringEscapeUtils.escapeJava(rowFormat.getLineDelimiter());
sb.append(" LINES TERMINATED BY '" + lineDelim + "'");
}
sb.append("\n");
}
if (storageHandlerClass == null) {
// We must handle LZO_TEXT specially because Impala does not support creating
// tables with this row format. In this case, we cannot output "WITH
// SERDEPROPERTIES" because Hive does not support it with "STORED AS". For any
// other HdfsFileFormat we want to output the serdeproperties because it is
// supported by Impala.
if (compression != HdfsCompression.LZO &&
compression != HdfsCompression.LZO_INDEX &&
serdeParameters != null && !serdeParameters.isEmpty()) {
sb.append(
"WITH SERDEPROPERTIES " + propertyMapToSql(serdeParameters) + "\n");
}
if (fileFormat != null) {
sb.append("STORED AS " + fileFormat.toSql(compression) + "\n");
}
} else {
// If the storageHandlerClass is set, then we will generate the proper Hive DDL
// because we do not yet support creating HBase tables via Impala.
sb.append("STORED BY '" + storageHandlerClass + "'\n");
if (serdeParameters != null && !serdeParameters.isEmpty()) {
sb.append(
"WITH SERDEPROPERTIES " + propertyMapToSql(serdeParameters) + "\n");
}
}
// Iceberg table with 'hadoop.catalog' do not display table LOCATION when using
// 'show create table', user can use 'describe formatted/extended' to get location
TIcebergCatalog icebergCatalog =
IcebergUtil.getTIcebergCatalog(tblProperties.get(IcebergTable.ICEBERG_CATALOG));
boolean isHadoopCatalog = fileFormat == HdfsFileFormat.ICEBERG &&
icebergCatalog == TIcebergCatalog.HADOOP_CATALOG;
if (location != null && !isHadoopCatalog) {
sb.append("LOCATION '" + location.toString() + "'\n");
}
if (tblProperties != null && !tblProperties.isEmpty()) {
sb.append("TBLPROPERTIES " + propertyMapToSql(tblProperties));
}
return sb.toString();
}