in phoenix-core-client/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java [417:594]
private QueryPlan addPlan(PhoenixStatement statement, SelectStatement select, PTable index,
List<? extends PDatum> targetColumns,
ParallelIteratorFactory parallelIteratorFactory, QueryPlan dataPlan,
boolean isHinted, SelectStatement indexSelect,
ColumnResolver resolver) throws SQLException {
int nColumns = dataPlan.getProjector().getColumnCount();
// We will or will not do tuple projection according to the data plan.
boolean isProjected = dataPlan.getContext().getResolver().getTables().get(0).getTable().getType() == PTableType.PROJECTED;
// Check index state of now potentially updated index table to make sure it's active
TableRef indexTableRef = resolver.getTables().get(0);
indexTableRef.setHinted(isHinted);
Map<TableRef, QueryPlan> dataPlans = Collections.singletonMap(indexTableRef, dataPlan);
PTable indexTable = indexTableRef.getTable();
PIndexState indexState = indexTable.getIndexState();
boolean isServerMergeForUncoveredIndexEnabled = statement.getConnection()
.getQueryServices().getProps().getBoolean(
QueryServices.SERVER_MERGE_FOR_UNCOVERED_INDEX,
QueryServicesOptions.DEFAULT_SERVER_MERGE_FOR_UNCOVERED_INDEX);
if (indexState == PIndexState.ACTIVE || indexState == PIndexState.PENDING_ACTIVE
|| (indexState == PIndexState.PENDING_DISABLE && isUnderPendingDisableThreshold(indexTableRef.getCurrentTime(), indexTable.getIndexDisableTimestamp()))) {
try {
if (!isServerMergeForUncoveredIndexEnabled
|| select.getHint().hasHint(HintNode.Hint.NO_INDEX_SERVER_MERGE)) {
String schemaNameStr = index.getSchemaName() == null ? null
: index.getSchemaName().getString();
String tableNameStr = index.getTableName() == null ? null
: index.getTableName().getString();
throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, "*");
}
// translate nodes that match expressions that are indexed to the
// associated column parse node
SelectStatement rewrittenIndexSelect = ParseNodeRewriter.rewrite(indexSelect, new IndexExpressionParseNodeRewriter(index, null, statement.getConnection(), indexSelect.getUdfParseNodes()));
QueryCompiler compiler = new QueryCompiler(statement, rewrittenIndexSelect, resolver, targetColumns, parallelIteratorFactory, dataPlan.getContext().getSequenceManager(), isProjected, true, dataPlans);
QueryPlan plan = compiler.compile();
if (indexTable.getIndexType() == IndexType.UNCOVERED_GLOBAL) {
// Indexed columns should also be added to the data columns to join for
// uncovered global indexes. This is required to verify index rows against
// data table rows
plan.getContext().setUncoveredIndex(true);
PhoenixConnection connection = statement.getConnection();
IndexMaintainer maintainer;
PTable newIndexTable;
String dataTableName;
if (indexTable.getViewIndexId() != null
&& indexTable.getName().getString().contains(
QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR)) {
// MetaDataClient modifies the index table name for view indexes if the
// parent view of an index has a child view. We need to recreate a PTable
// object with the correct table name to get the index maintainer
int lastIndexOf = indexTable.getName().getString().lastIndexOf(
QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR);
String indexName = indexTable.getName().getString().substring(lastIndexOf + 1);
newIndexTable = connection.getTable(indexName);
dataTableName = SchemaUtil.getTableName(
newIndexTable.getParentSchemaName().getString(),
indexTable.getParentTableName().getString());
} else {
newIndexTable = indexTable;
dataTableName = SchemaUtil.getTableName(
indexTable.getParentSchemaName().getString(),
indexTable.getParentTableName().getString());
}
PTable dataTableFromDataPlan = dataPlan.getTableRef().getTable();
PTable cdcTable = null;
if (dataTableFromDataPlan.getType() == PTableType.CDC) {
cdcTable = dataTableFromDataPlan;
dataTableName = SchemaUtil.getTableName(
indexTable.getParentSchemaName().getString(),
dataTableFromDataPlan.getParentTableName().getString());
}
PTable dataTable = connection.getTable(dataTableName);
maintainer = newIndexTable.getIndexMaintainer(dataTable, cdcTable, connection);
Set<org.apache.hadoop.hbase.util.Pair<String, String>> indexedColumns =
maintainer.getIndexedColumnInfo();
for (org.apache.hadoop.hbase.util.Pair<String, String> pair : indexedColumns) {
// The first member of the pair is the column family. For the data table PK columns, the column
// family is set to null. The data PK columns should not be added to the set of data columns
// to join back to index rows
if (pair.getFirst() != null) {
PColumn pColumn = dataTable.getColumnForColumnName(pair.getSecond());
// The following adds the column to the set
plan.getContext().getDataColumnPosition(pColumn);
}
}
if (dataTableFromDataPlan.getType() == PTableType.CDC) {
PColumn cdcJsonCol = dataTableFromDataPlan.getColumnForColumnName(
CDC_JSON_COL_NAME);
plan.getContext().getDataColumnPosition(cdcJsonCol);
}
}
indexTableRef = plan.getTableRef();
indexTable = indexTableRef.getTable();
indexState = indexTable.getIndexState();
// Checking number of columns handles the wildcard cases correctly, as in that case the index
// must contain all columns from the data table to be able to be used.
if (indexState == PIndexState.ACTIVE || indexState == PIndexState.PENDING_ACTIVE
|| (indexState == PIndexState.PENDING_DISABLE
&& isUnderPendingDisableThreshold(indexTableRef.getCurrentTime(),
indexTable.getIndexDisableTimestamp()))) {
if (plan.getProjector().getColumnCount() == nColumns) {
return plan;
} else {
String schemaNameStr = index.getSchemaName() == null ? null
: index.getSchemaName().getString();
String tableNameStr = index.getTableName() == null ? null
: index.getTableName().getString();
throw new ColumnNotFoundException(schemaNameStr, tableNameStr, null, "*");
}
}
} catch (ColumnNotFoundException e) {
/* Means that a column is being used that's not in our index.
* Since we currently don't keep stats, we don't know the selectivity of the index.
* For now, if this is a hinted plan, we will try rewriting the query as a subquery;
* otherwise we just don't use this index (as opposed to trying to join back from
* the index table to the data table.
*/
// Reset the state changes from the attempt above
indexTableRef.setHinted(false);
dataPlan.getContext().setUncoveredIndex(false);
SelectStatement dataSelect = (SelectStatement)dataPlan.getStatement();
ParseNode where = dataSelect.getWhere();
if (isHinted && where != null) {
StatementContext context = new StatementContext(statement, resolver);
WhereConditionRewriter whereRewriter = new WhereConditionRewriter(FromCompiler.getResolver(dataPlan.getTableRef()), context);
where = where.accept(whereRewriter);
if (where != null) {
PTable dataTable = dataPlan.getTableRef().getTable();
List<PColumn> pkColumns = dataTable.getPKColumns();
List<AliasedNode> aliasedNodes = Lists.<AliasedNode>newArrayListWithExpectedSize(pkColumns.size());
List<ParseNode> nodes = Lists.<ParseNode>newArrayListWithExpectedSize(pkColumns.size());
boolean isSalted = dataTable.getBucketNum() != null;
boolean isTenantSpecific = dataTable.isMultiTenant() && statement.getConnection().getTenantId() != null;
int posOffset = (isSalted ? 1 : 0) + (isTenantSpecific ? 1 : 0);
for (int i = posOffset; i < pkColumns.size(); i++) {
PColumn column = pkColumns.get(i);
String indexColName = IndexUtil.getIndexColumnName(column);
ParseNode indexColNode = new ColumnParseNode(null, '"' + indexColName + '"', indexColName);
PDataType indexColType = IndexUtil.getIndexColumnDataType(column);
PDataType dataColType = column.getDataType();
if (indexColType != dataColType) {
indexColNode = FACTORY.cast(indexColNode, dataColType, null, null);
}
aliasedNodes.add(FACTORY.aliasedNode(null, indexColNode));
nodes.add(new ColumnParseNode(null, '"' + column.getName().getString() + '"'));
}
SelectStatement innerSelect = FACTORY.select(indexSelect.getFrom(), indexSelect.getHint(), false, aliasedNodes, where, null, null, null, null, null, indexSelect.getBindCount(), false, indexSelect.hasSequence(), Collections.<SelectStatement>emptyList(), indexSelect.getUdfParseNodes());
ParseNode outerWhere = FACTORY.in(nodes.size() == 1 ? nodes.get(0) : FACTORY.rowValueConstructor(nodes), FACTORY.subquery(innerSelect, false), false, true);
ParseNode extractedCondition = whereRewriter.getExtractedCondition();
if (extractedCondition != null) {
outerWhere = FACTORY.and(Lists.newArrayList(outerWhere, extractedCondition));
}
HintNode hint = HintNode.combine(HintNode.subtract(indexSelect.getHint(), new Hint[] {Hint.INDEX, Hint.NO_CHILD_PARENT_JOIN_OPTIMIZATION}), FACTORY.hint("NO_INDEX"));
SelectStatement query = FACTORY.select(dataSelect, hint, outerWhere);
RewriteResult rewriteResult =
ParseNodeUtil.rewrite(query, statement.getConnection());
QueryPlan plan = new QueryCompiler(
statement,
rewriteResult.getRewrittenSelectStatement(),
rewriteResult.getColumnResolver(),
targetColumns,
parallelIteratorFactory,
dataPlan.getContext().getSequenceManager(),
isProjected,
true,
dataPlans).compile();
return plan;
}
}
}
catch (RowValueConstructorOffsetNotCoercibleException e) {
// Could not coerce the user provided RVC Offset so we do not have a plan to add.
return null;
}
}
return null;
}