in modules/accumulo/src/main/java/org/apache/fluo/accumulo/iterators/PrewriteIterator.java [93:246]
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
throws IOException {
Collection<ByteSequence> fams;
if (columnFamilies.isEmpty() && !inclusive) {
fams = SnapshotIterator.NOTIFY_CF_SET;
inclusive = false;
} else {
fams = columnFamilies;
}
Key endKey = new Key(range.getStartKey());
if (checkAck) {
endKey.setTimestamp(ColumnType.DATA.first());
} else {
endKey.setTimestamp(ColumnType.ACK.first());
}
// Tried seeking directly to WRITE_PREFIX, however this did not work well because of how
// TimestampSkippingIterator currently works. Currently, it can not remove the deleting iterator
// until after the first seek.
Range seekRange = new Range(range.getStartKey(), true, endKey, false);
source.seek(seekRange, fams, inclusive);
hasTop = false;
long invalidationTime = -1;
while (source.hasTop() && seekRange.getStartKey().equals(source.getTopKey(),
PartialKey.ROW_COLFAM_COLQUAL_COLVIS)) {
ColumnType colType = ColumnType.from(source.getTopKey());
long ts = source.getTopKey().getTimestamp() & ColumnConstants.TIMESTAMP_MASK;
switch (colType) {
case TX_DONE: {
// tried to make 1st seek go to WRITE_PREFIX, but this did not allow the DeleteIterator to
// be removed from the stack so it was slower.
source.skipToPrefix(seekRange.getStartKey(), ColumnType.WRITE);
break;
}
case WRITE: {
long timePtr = WriteValue.getTimestamp(source.getTopValue().get());
if (timePtr > invalidationTime) {
invalidationTime = timePtr;
}
if (ts >= snaptime) {
hasTop = true;
return;
}
source.skipToPrefix(seekRange.getStartKey(), ColumnType.DEL_LOCK);
break;
}
case DEL_LOCK: {
if (ts > invalidationTime) {
invalidationTime = ts;
if (ts >= snaptime) {
hasTop = true;
return;
}
}
if (readlock) {
source.skipToPrefix(seekRange.getStartKey(), ColumnType.LOCK);
} else {
source.skipToPrefix(seekRange.getStartKey(), ColumnType.RLOCK);
}
break;
}
case RLOCK: {
long lastDeleteTs = -1;
long rlts = ReadLockUtil.decodeTs(ts);
if (!readlock) {
while (rlts > invalidationTime && colType == ColumnType.RLOCK) {
if (ReadLockUtil.isDelete(ts)) {
// ignore rolled back read locks, these should never prevent a write lock
if (!DelReadLockValue.isRollback(source.getTopValue().get())) {
if (rlts >= snaptime) {
hasTop = true;
return;
} else {
long rlockCommitTs =
DelReadLockValue.getCommitTimestamp(source.getTopValue().get());
if (rlockCommitTs > snaptime) {
hasTop = true;
return;
}
}
}
lastDeleteTs = rlts;
} else {
if (rlts != lastDeleteTs) {
// this read lock is active
hasTop = true;
return;
}
}
source.next();
if (source.hasTop()) {
colType = ColumnType.from(source.getTopKey());
ts = source.getTopKey().getTimestamp() & ColumnConstants.TIMESTAMP_MASK;
rlts = ReadLockUtil.decodeTs(ts);
} else {
break;
}
}
}
if (source.hasTop() && (colType == ColumnType.RLOCK)) {
source.skipToPrefix(seekRange.getStartKey(), ColumnType.LOCK);
}
break;
}
case LOCK: {
if (ts > invalidationTime) {
// nothing supersedes this lock, therefore the column is locked
hasTop = true;
return;
}
if (checkAck) {
source.skipToPrefix(seekRange.getStartKey(), ColumnType.ACK);
} else {
// only ack and data left and not interested in either so stop looking
return;
}
break;
}
case DATA: {
// can stop looking
return;
}
case ACK: {
if (checkAck && ts > ntfyTimestamp) {
hasTop = true;
return;
} else {
// nothing else to look at in this column
return;
}
}
default:
throw new IllegalArgumentException();
}
}
}