in phoenix-core-client/src/main/java/org/apache/phoenix/util/ScanUtil.java [455:618]
public static int setKey(RowKeySchema schema, List<List<KeyRange>> slots, int[] slotSpan, int[] position,
Bound bound, byte[] key, int byteOffset, int slotStartIndex, int slotEndIndex, int schemaStartIndex) {
int offset = byteOffset;
boolean lastInclusiveUpperSingleKey = false;
boolean anyInclusiveUpperRangeKey = false;
boolean lastUnboundUpper = false;
// The index used for slots should be incremented by 1,
// but the index for the field it represents in the schema
// should be incremented by 1 + value in the current slotSpan index
// slotSpan stores the number of columns beyond one that the range spans
Field field = null;
int i = slotStartIndex, fieldIndex = ScanUtil.getRowKeyPosition(slotSpan, slotStartIndex);
for (i = slotStartIndex; i < slotEndIndex; i++) {
// Build up the key by appending the bound of each key range
// from the current position of each slot.
KeyRange range = slots.get(i).get(position[i]);
// Use last slot in a multi-span column to determine if fixed width
field = schema.getField(fieldIndex + slotSpan[i]);
boolean isFixedWidth = field.getDataType().isFixedWidth();
/*
* If the current slot is unbound then stop if:
* 1) setting the upper bound. There's no value in
* continuing because nothing will be filtered.
* 2) setting the lower bound when the type is fixed length
* for the same reason. However, if the type is variable width
* continue building the key because null values will be filtered
* since our separator byte will be appended and incremented.
*/
lastUnboundUpper = false;
if (range.isUnbound(bound) && (bound == Bound.UPPER || isFixedWidth)) {
lastUnboundUpper = (bound == Bound.UPPER);
break;
}
byte[] bytes = range.getRange(bound);
System.arraycopy(bytes, 0, key, offset, bytes.length);
offset += bytes.length;
/*
* We must add a terminator to a variable length key even for the last PK column if
* the lower key is non inclusive or the upper key is inclusive. Otherwise, we'd be
* incrementing the key value itself, and thus bumping it up too much.
*/
boolean inclusiveUpper = range.isUpperInclusive() && bound == Bound.UPPER;
boolean exclusiveLower = !range.isLowerInclusive() && bound == Bound.LOWER && range != KeyRange.EVERYTHING_RANGE;
boolean exclusiveUpper = !range.isUpperInclusive() && bound == Bound.UPPER;
// If we are setting the upper bound of using inclusive single key, we remember
// to increment the key if we exit the loop after this iteration.
//
// We remember to increment the last slot if we are setting the upper bound with an
// inclusive range key.
//
// We cannot combine the two flags together in case for single-inclusive key followed
// by the range-exclusive key. In that case, we do not need to increment the end at the
// end. But if we combine the two flag, the single inclusive key in the middle of the
// key slots would cause the flag to become true.
lastInclusiveUpperSingleKey = range.isSingleKey() && inclusiveUpper;
anyInclusiveUpperRangeKey |= !range.isSingleKey() && inclusiveUpper;
if (field.getDataType() != PVarbinaryEncoded.INSTANCE) {
// A null or empty byte array is always represented as a zero byte
byte sepByte =
SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), bytes.length == 0,
field);
if (!isFixedWidth && (sepByte == QueryConstants.DESC_SEPARATOR_BYTE || (
!exclusiveUpper && (fieldIndex < schema.getMaxFields() || inclusiveUpper
|| exclusiveLower)))) {
key[offset++] = sepByte;
// Set lastInclusiveUpperSingleKey back to false if this is the last pk column
// as we don't want to increment the QueryConstants.SEPARATOR_BYTE byte in this case.
// To test if this is the last pk column we need to consider the span of this slot
// and the field index to see if this slot considers the last column.
// But if last field of rowKey is variable length and also DESC, the trailing 0xFF
// is not removed when stored in HBASE, so for such case, we should not set
// lastInclusiveUpperSingleKey back to false.
if (sepByte != QueryConstants.DESC_SEPARATOR_BYTE) {
lastInclusiveUpperSingleKey &=
(fieldIndex + slotSpan[i]) < schema.getMaxFields() - 1;
}
}
} else {
byte[] sepBytes =
SchemaUtil.getSeparatorBytesForVarBinaryEncoded(schema.rowKeyOrderOptimizable(),
bytes.length == 0, field.getSortOrder());
if (!isFixedWidth && (
sepBytes == QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES || (
!exclusiveUpper && (fieldIndex < schema.getMaxFields() || inclusiveUpper
|| exclusiveLower)))) {
key[offset++] = sepBytes[0];
key[offset++] = sepBytes[1];
if (sepBytes != QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES) {
lastInclusiveUpperSingleKey &=
(fieldIndex + slotSpan[i]) < schema.getMaxFields() - 1;
}
}
}
if (exclusiveUpper) {
// Cannot include anything else on the key, as otherwise
// keys that match the upper range will be included. For example WHERE k1 < 2 and k2 = 3
// would match k1 = 2, k2 = 3 which is wrong.
break;
}
// If we are setting the lower bound with an exclusive range key, we need to bump the
// slot up for each key part. For an upper bound, we bump up an inclusive key, but
// only after the last key part.
if (exclusiveLower) {
if (!ByteUtil.nextKey(key, offset)) {
// Special case for not being able to increment.
// In this case we return a negative byteOffset to
// remove this part from the key being formed. Since the
// key has overflowed, this means that we should not
// have an end key specified.
return -byteOffset;
}
// We're filtering on values being non null here, but we still need the 0xFF
// terminator, since DESC keys ignore the last byte as it's expected to be
// the terminator. Without this, we'd ignore the separator byte that was
// just added and incremented.
if (field.getDataType() != PVarbinaryEncoded.INSTANCE) {
if (!isFixedWidth && bytes.length == 0 &&
SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), false, field)
== QueryConstants.DESC_SEPARATOR_BYTE) {
key[offset++] = QueryConstants.DESC_SEPARATOR_BYTE;
}
} else {
if (!isFixedWidth && bytes.length == 0 &&
SchemaUtil.getSeparatorBytesForVarBinaryEncoded(
schema.rowKeyOrderOptimizable(), false, field.getSortOrder())
== QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES) {
key[offset++] = QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES[0];
key[offset++] = QueryConstants.DESC_VARBINARY_ENCODED_SEPARATOR_BYTES[1];
}
}
}
fieldIndex += slotSpan[i] + 1;
}
if (lastInclusiveUpperSingleKey || anyInclusiveUpperRangeKey || lastUnboundUpper) {
if (!ByteUtil.nextKey(key, offset)) {
// Special case for not being able to increment.
// In this case we return a negative byteOffset to
// remove this part from the key being formed. Since the
// key has overflowed, this means that we should not
// have an end key specified.
return -byteOffset;
}
}
// Remove trailing separator bytes, since the columns may have been added
// after the table has data, in which case there won't be a separator
// byte.
if (bound == Bound.LOWER) {
while (--i >= schemaStartIndex && offset > byteOffset && !(field =
schema.getField(--fieldIndex)).getDataType().isFixedWidth()
&& field.getSortOrder() == SortOrder.ASC && hasSeparatorBytes(key, field, offset)) {
if (field.getDataType() != PVarbinaryEncoded.INSTANCE) {
offset--;
fieldIndex -= slotSpan[i];
} else {
offset -= 2;
fieldIndex -= slotSpan[i];
}
}
}
return offset - byteOffset;
}