in metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/DirectSqlSavePartition.java [124:188]
private void _insert(final QualifiedName tableQName, final Table table, final TableSequenceIds tableSequenceIds,
final PartitionSequenceIds partitionSequenceIds, final List<PartitionInfo> partitions,
final long currentTimeInEpoch, final int index) {
final List<Object[]> serdesValues = Lists.newArrayList();
final List<Object[]> serdeParamsValues = Lists.newArrayList();
final List<Object[]> sdsValues = Lists.newArrayList();
final List<Object[]> partitionsValues = Lists.newArrayList();
final List<Object[]> partitionParamsValues = Lists.newArrayList();
final List<Object[]> partitionKeyValsValues = Lists.newArrayList();
final List<String> partitionNames = Lists.newArrayList();
int currentIndex = index;
for (PartitionInfo partition : partitions) {
final StorageInfo storageInfo = partition.getSerde();
final long partId = partitionSequenceIds.getPartId() + currentIndex;
final long sdsId = partitionSequenceIds.getSdsId() + currentIndex;
final long serdeId = partitionSequenceIds.getSerdeId() + currentIndex;
final String partitionName = partition.getName().getPartitionName();
final List<String> partValues = PartitionUtil.getPartValuesFromPartName(tableQName, table, partitionName);
final String escapedPartName = PartitionUtil.makePartName(table.getPartitionKeys(), partValues);
partitionsValues.add(new Object[]{0, tableSequenceIds.getTableId(), currentTimeInEpoch,
sdsId, escapedPartName, partId, });
for (int i = 0; i < partValues.size(); i++) {
partitionKeyValsValues.add(new Object[]{partId, partValues.get(i), i});
}
// Partition parameters
final Map<String, String> parameters = partition.getMetadata();
if (parameters != null) {
parameters
.forEach((key, value) -> partitionParamsValues.add(new Object[]{value, partId, key}));
}
partitionParamsValues.add(new Object[]{currentTimeInEpoch, partId, PARAM_LAST_DDL_TIME});
if (storageInfo != null) {
serdesValues.add(new Object[]{null, storageInfo.getSerializationLib(), serdeId});
final Map<String, String> serdeInfoParameters = storageInfo.getSerdeInfoParameters();
if (serdeInfoParameters != null) {
serdeInfoParameters
.forEach((key, value) -> serdeParamsValues.add(new Object[]{value, serdeId, key}));
}
sdsValues.add(new Object[]{storageInfo.getOutputFormat(), false, tableSequenceIds.getCdId(),
false, serdeId, storageInfo.getUri(), storageInfo.getInputFormat(), 0, sdsId, });
}
partitionNames.add(partitionName);
currentIndex++;
}
try {
jdbcTemplate.batchUpdate(SQL.SERDES_INSERT, serdesValues,
new int[]{Types.VARCHAR, Types.VARCHAR, Types.BIGINT});
jdbcTemplate.batchUpdate(SQL.SERDE_PARAMS_INSERT, serdeParamsValues,
new int[]{Types.VARCHAR, Types.BIGINT, Types.VARCHAR});
jdbcTemplate.batchUpdate(SQL.SDS_INSERT, sdsValues,
new int[]{Types.VARCHAR, Types.BOOLEAN, Types.BIGINT, Types.BOOLEAN,
Types.BIGINT, Types.VARCHAR, Types.VARCHAR, Types.INTEGER, Types.BIGINT, });
jdbcTemplate.batchUpdate(SQL.PARTITIONS_INSERT, partitionsValues,
new int[]{Types.INTEGER, Types.BIGINT, Types.INTEGER, Types.BIGINT, Types.VARCHAR, Types.BIGINT});
jdbcTemplate.batchUpdate(SQL.PARTITION_PARAMS_INSERT, partitionParamsValues,
new int[]{Types.VARCHAR, Types.BIGINT, Types.VARCHAR});
jdbcTemplate.batchUpdate(SQL.PARTITION_KEY_VALS_INSERT, partitionKeyValsValues,
new int[]{Types.BIGINT, Types.VARCHAR, Types.INTEGER});
} catch (DuplicateKeyException e) {
throw new PartitionAlreadyExistsException(tableQName, partitionNames, e);
} catch (Exception e) {
throw new ConnectorException(
String.format("Failed inserting partitions %s for table %s", partitionNames, tableQName), e);
}
}