in phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java [5570:5785]
public MutationState alterIndex(AlterIndexStatement statement) throws SQLException {
connection.rollback();
boolean wasAutoCommit = connection.getAutoCommit();
String dataTableName;
long seqNum = 0L;
try {
dataTableName = statement.getTableName();
final String indexName = statement.getTable().getName().getTableName();
boolean isAsync = statement.isAsync();
boolean isRebuildAll = statement.isRebuildAll();
String tenantId = connection.getTenantId() == null ? null : connection.getTenantId().getString();
PTable table = FromCompiler.getIndexResolver(statement, connection)
.getTables().get(0).getTable();
String schemaName = statement.getTable().getName().getSchemaName();
String tableName = table.getTableName().getString();
Map<String, List<Pair<String, Object>>> properties=new HashMap<>(statement.getProps().size());;
MetaProperties metaProperties = loadStmtProperties(statement.getProps(),properties,table,false);
boolean isTransformNeeded = TransformClient.checkIsTransformNeeded(metaProperties, schemaName, table, indexName, dataTableName, tenantId, connection);
MetaPropertiesEvaluated metaPropertiesEvaluated = new MetaPropertiesEvaluated();
boolean changingPhoenixTableProperty= evaluateStmtProperties(metaProperties,metaPropertiesEvaluated,table,schemaName,tableName,new MutableBoolean(false));
PIndexState newIndexState = statement.getIndexState();
if (isAsync && newIndexState != PIndexState.REBUILD) { throw new SQLExceptionInfo.Builder(
SQLExceptionCode.ASYNC_NOT_ALLOWED)
.setMessage(" ASYNC building of index is allowed only with REBUILD index state")
.setSchemaName(schemaName).setTableName(indexName).build().buildException(); }
if (newIndexState == PIndexState.REBUILD) {
newIndexState = PIndexState.BUILDING;
}
connection.setAutoCommit(false);
// Confirm index table is valid and up-to-date
TableRef indexRef = FromCompiler.getResolver(statement, connection).getTables().get(0);
PreparedStatement tableUpsert = null;
try {
if (newIndexState == PIndexState.ACTIVE){
tableUpsert = connection.prepareStatement(UPDATE_INDEX_STATE_TO_ACTIVE);
} else {
tableUpsert = connection.prepareStatement(UPDATE_INDEX_STATE);
}
tableUpsert.setString(1, connection.getTenantId() == null ? null : connection.getTenantId().getString());
tableUpsert.setString(2, schemaName);
tableUpsert.setString(3, indexName);
tableUpsert.setString(4, newIndexState.getSerializedValue());
tableUpsert.setLong(5, 0);
if (newIndexState == PIndexState.ACTIVE){
tableUpsert.setLong(6, 0);
}
tableUpsert.execute();
} finally {
if (tableUpsert != null) {
tableUpsert.close();
}
}
Long timeStamp = indexRef.getTable().isTransactional() ? indexRef.getTimeStamp() : null;
List<Mutation> tableMetadata = connection.getMutationState().toMutations(timeStamp).next().getSecond();
connection.rollback();
if (changingPhoenixTableProperty) {
seqNum = incrementTableSeqNum(table,statement.getTableType(), 0, metaPropertiesEvaluated);
tableMetadata.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
connection.rollback();
}
MetaDataMutationResult result = connection.getQueryServices().updateIndexState(tableMetadata, dataTableName, properties, table);
try {
MutationCode code = result.getMutationCode();
if (code == MutationCode.TABLE_NOT_FOUND) {
throw new TableNotFoundException(schemaName, indexName);
}
if (code == MutationCode.UNALLOWED_TABLE_MUTATION) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_INDEX_STATE_TRANSITION)
.setMessage(" currentState=" + indexRef.getTable().getIndexState() + ". requestedState=" + newIndexState)
.setSchemaName(schemaName).setTableName(indexName).build().buildException();
}
if (isTransformNeeded) {
if (indexRef.getTable().getViewIndexId() != null) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_TRANSFORM_LOCAL_OR_VIEW_INDEX)
.setSchemaName(schemaName).setTableName(indexName).build().buildException();
}
try {
TransformClient.addTransform(connection, tenantId, table, metaProperties, seqNum, PTable.TransformType.METADATA_TRANSFORM);
} catch (SQLException ex) {
connection.rollback();
throw ex;
}
}
if (code == MutationCode.TABLE_ALREADY_EXISTS) {
if (result.getTable() != null) { // To accommodate connection-less update of index state
addTableToCache(result, false);
// Set so that we get the table below with the potentially modified rowKeyOrderOptimizable flag set
indexRef.setTable(result.getTable());
if (newIndexState == PIndexState.BUILDING && isAsync) {
if (isRebuildAll) {
List<Task.TaskRecord> tasks = Task.queryTaskTable(connection, null, schemaName, tableName, PTable.TaskType.INDEX_REBUILD,
tenantId, indexName);
if (tasks == null || tasks.size() == 0) {
Timestamp ts = new Timestamp(EnvironmentEdgeManager.currentTimeMillis());
Map<String, Object> props = new HashMap<String, Object>() {{
put(INDEX_NAME, indexName);
put(REBUILD_ALL, true);
}};
try {
String json = JacksonUtil.getObjectWriter().writeValueAsString(props);
List<Mutation> sysTaskUpsertMutations = Task.getMutationsForAddTask(new SystemTaskParams.SystemTaskParamsBuilder()
.setConn(connection)
.setTaskType(
PTable.TaskType.INDEX_REBUILD)
.setTenantId(tenantId)
.setSchemaName(schemaName)
.setTableName(dataTableName)
.setTaskStatus(
PTable.TaskStatus.CREATED.toString())
.setData(json)
.setPriority(null)
.setStartTs(ts)
.setEndTs(null)
.setAccessCheckEnabled(true)
.build());
byte[] rowKey = sysTaskUpsertMutations
.get(0).getRow();
MetaDataMutationResult metaDataMutationResult =
Task.taskMetaDataCoprocessorExec(connection, rowKey,
new TaskMetaDataServiceCallBack(sysTaskUpsertMutations));
if (MutationCode.UNABLE_TO_UPSERT_TASK.equals(
metaDataMutationResult.getMutationCode())) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNABLE_TO_UPSERT_TASK)
.setSchemaName(SYSTEM_SCHEMA_NAME)
.setTableName(SYSTEM_TASK_TABLE).build().buildException();
}
} catch (IOException e) {
throw new SQLException("Exception happened while adding a System.Task" + e.toString());
}
}
} else {
try {
tableUpsert = connection.prepareStatement(UPDATE_INDEX_REBUILD_ASYNC_STATE);
tableUpsert.setString(1, connection.getTenantId() == null ?
null :
connection.getTenantId().getString());
tableUpsert.setString(2, schemaName);
tableUpsert.setString(3, indexName);
long beginTimestamp = result.getTable().getTimeStamp();
tableUpsert.setLong(4, beginTimestamp);
tableUpsert.execute();
connection.commit();
} finally {
if (tableUpsert != null) {
tableUpsert.close();
}
}
}
}
}
}
if (newIndexState == PIndexState.BUILDING && !isAsync) {
PTable index = indexRef.getTable();
// First delete any existing rows of the index
if (IndexUtil.isGlobalIndex(index) && index.getViewIndexId() == null) {
//for a global index of a normal base table, it's safe to just truncate and
//rebuild. We preserve splits to reduce the amount of splitting we need to do
//during rebuild
org.apache.hadoop.hbase.TableName physicalTableName =
org.apache.hadoop.hbase.TableName.valueOf(index.getPhysicalName().getBytes());
try (Admin admin = connection.getQueryServices().getAdmin()) {
admin.disableTable(physicalTableName);
admin.truncateTable(physicalTableName, true);
//trunateTable automatically re-enables when it's done
} catch (IOException ie) {
String failedTable = physicalTableName.getNameAsString();
throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNKNOWN_ERROR_CODE).
setMessage("Error when truncating index table [" + failedTable +
"] before rebuilding: " + ie.getMessage()).
setTableName(failedTable).build().buildException();
}
} else {
Long scn = connection.getSCN();
long ts = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
MutationPlan plan = new PostDDLCompiler(connection)
.compile(Collections.singletonList(indexRef), null,
null, Collections.<PColumn>emptyList(), ts);
connection.getQueryServices().updateData(plan);
}
NamedTableNode dataTableNode = NamedTableNode.create(null,
TableName.create(schemaName, dataTableName), Collections.<ColumnDef>emptyList());
// Next rebuild the index
connection.setAutoCommit(true);
if (connection.getSCN() != null) {
return buildIndexAtTimeStamp(index, dataTableNode);
}
TableRef dataTableRef = FromCompiler.getResolver(dataTableNode, connection).getTables().get(0);
return buildIndex(index, dataTableRef);
}
return new MutationState(1, 1000, connection);
} catch (Throwable e) {
TableMetricsManager.updateMetricsForSystemCatalogTableMethod(dataTableName, NUM_METADATA_LOOKUP_FAILURES, 1);
throw e;
}
} catch (TableNotFoundException e) {
if (!statement.ifExists()) {
throw e;
}
return new MutationState(0, 0, connection);
} finally {
connection.setAutoCommit(wasAutoCommit);
}
}