in solr/core/src/java/org/apache/solr/uninverting/DocTermOrds.java [280:548]
protected void uninvert(final LeafReader reader, Bits liveDocs, final BytesRef termPrefix)
throws IOException {
final FieldInfo info = reader.getFieldInfos().fieldInfo(field);
if (checkForDocValues && info != null && info.getDocValuesType() != DocValuesType.NONE) {
throw new IllegalStateException(
"Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
}
// System.out.println("DTO uninvert field=" + field + " prefix=" + termPrefix);
final long startTime = System.nanoTime();
prefix = termPrefix == null ? null : BytesRef.deepCopyOf(termPrefix);
final int maxDoc = reader.maxDoc();
// immediate term numbers, or the index into the byte[] representing the last number
final int[] index = new int[maxDoc];
// last term we saw for this document
final int[] lastTerm = new int[maxDoc];
// list of term numbers for the doc (delta encoded vInts)
final byte[][] bytes = new byte[maxDoc][];
final Terms terms = reader.terms(field);
if (terms == null) {
// No terms
return;
}
final TermsEnum te = terms.iterator();
final BytesRef seekStart = termPrefix != null ? termPrefix : new BytesRef();
// System.out.println("seekStart=" + seekStart.utf8ToString());
if (te.seekCeil(seekStart) == TermsEnum.SeekStatus.END) {
// No terms match
return;
}
// For our "term index wrapper"
final List<BytesRef> indexedTerms = new ArrayList<>();
final PagedBytes indexedTermsBytes = new PagedBytes(15);
// we need a minimum of 9 bytes, but round up to 12 since the space would
// be wasted with most allocators anyway.
byte[] tempArr = new byte[12];
//
// enumerate all terms, and build an intermediate form of the un-inverted field.
//
// During this intermediate form, every document has a (potential) byte[]
// and the int[maxDoc()] array either contains the termNumber list directly
// or the *end* offset of the termNumber list in its byte array (for faster
// appending and faster creation of the final form).
//
// idea... if things are too large while building, we could do a range of docs
// at a time (but it would be a fair amount slower to build)
// could also do ranges in parallel to take advantage of multiple CPUs
// OPTIONAL: remap the largest df terms to the lowest 128 (single byte)
// values. This requires going over the field first to find the most
// frequent terms ahead of time.
int termNum = 0;
postingsEnum = null;
// Loop begins with te positioned to first term (we call
// seek above):
for (; ; ) {
final BytesRef t = te.term();
if (t == null || (termPrefix != null && !StringHelper.startsWith(t, termPrefix))) {
break;
}
// System.out.println("visit term=" + t.utf8ToString() + " " + t + " termNum=" + termNum);
visitTerm(te, termNum);
if ((termNum & indexIntervalMask) == 0) {
// Index this term
sizeOfIndexedStrings += t.length;
BytesRef indexedTerm = new BytesRef();
indexedTermsBytes.copy(t, indexedTerm);
// TODO: really should 1) strip off useless suffix,
// and 2) use FST not array/PagedBytes
indexedTerms.add(indexedTerm);
}
final int df = te.docFreq();
if (df <= maxTermDocFreq) {
postingsEnum = te.postings(postingsEnum, PostingsEnum.NONE);
// dF, but takes deletions into account
int actualDF = 0;
for (; ; ) {
int doc = postingsEnum.nextDoc();
if (doc == DocIdSetIterator.NO_MORE_DOCS) {
break;
}
// System.out.println(" chunk=" + chunk + " docs");
actualDF++;
termInstances++;
// System.out.println(" docID=" + doc);
// add TNUM_OFFSET to the term number to make room for special reserved values:
// 0 (end term) and 1 (index into byte array follows)
int delta = termNum - lastTerm[doc] + TNUM_OFFSET;
lastTerm[doc] = termNum;
int val = index[doc];
if ((val & 0x80000000) != 0) {
// index into byte array (actually the end of the doc-specific byte[] when building)
int pos = val & 0x7fffffff;
int ilen = vIntSize(delta);
byte[] arr = bytes[doc];
int newend = pos + ilen;
if (newend > arr.length) {
// We avoid a doubling strategy to lower memory usage.
// this faceting method isn't for docs with many terms.
// In hotspot, objects have 2 words of overhead, then fields, rounded up to a 64-bit
// boundary.
// TODO: figure out what array lengths we can round up to w/o actually using more
// memory
// (how much space does a byte[] take up? Is data preceded by a 32 bit length only?
// It should be safe to round up to the nearest 32 bits in any case.
int newLen = (newend + 3) & 0xfffffffc; // 4 byte alignment
byte[] newarr = new byte[newLen];
System.arraycopy(arr, 0, newarr, 0, pos);
arr = newarr;
bytes[doc] = newarr;
}
pos = writeInt(delta, arr, pos);
index[doc] = pos | 0x80000000; // update pointer to end index in byte[]
} else {
// OK, this int has data in it... find the end (a zero starting byte - not
// part of another number, hence not following a byte with the high bit set).
int ipos;
if (val == 0) {
ipos = 0;
} else if ((val & 0x0000ff80) == 0) {
ipos = 1;
} else if ((val & 0x00ff8000) == 0) {
ipos = 2;
} else if ((val & 0xff800000) == 0) {
ipos = 3;
} else {
ipos = 4;
}
// System.out.println(" ipos=" + ipos);
int endPos = writeInt(delta, tempArr, ipos);
// System.out.println(" endpos=" + endPos);
if (endPos <= 4) {
// System.out.println(" fits!");
// value will fit in the integer... move bytes back
for (int j = ipos; j < endPos; j++) {
val |= (tempArr[j] & 0xff) << (j << 3);
}
index[doc] = val;
} else {
// value won't fit... move integer into byte[]
for (int j = 0; j < ipos; j++) {
tempArr[j] = (byte) val;
val >>>= 8;
}
// point at the end index in the byte[]
index[doc] = endPos | 0x80000000;
bytes[doc] = tempArr;
tempArr = new byte[12];
}
}
}
setActualDocFreq(termNum, actualDF);
}
termNum++;
if (te.next() == null) {
break;
}
}
numTermsInField = termNum;
long midPoint = System.nanoTime();
if (termInstances == 0) {
// we didn't invert anything
// lower memory consumption.
tnums = null;
} else {
this.index = index;
//
// transform intermediate form into the final form, building a single byte[]
// at a time, and releasing the intermediate byte[]s as we go to avoid
// increasing the memory footprint.
//
for (int pass = 0; pass < 256; pass++) {
byte[] target = tnums[pass];
int pos = 0; // end in target;
if (target != null) {
pos = target.length;
} else {
target = new byte[4096];
}
// loop over documents, 0x00ppxxxx, 0x01ppxxxx, 0x02ppxxxx
// where pp is the pass (which array we are building), and xx is all values.
// each pass shares the same byte[] for termNumber lists.
for (int docbase = pass << 16; docbase < maxDoc; docbase += (1 << 24)) {
int lim = Math.min(docbase + (1 << 16), maxDoc);
for (int doc = docbase; doc < lim; doc++) {
// System.out.println(" pass=" + pass + " process docID=" + doc);
int val = index[doc];
if ((val & 0x80000000) != 0) {
int len = val & 0x7fffffff;
// System.out.println(" ptr pos=" + pos);
// index[doc] = (pos<<8)|1; // change index to point to start of array
index[doc] = pos | 0x80000000; // change index to point to start of array
byte[] arr = bytes[doc];
/*
for(byte b : arr) {
//System.out.println(" b=" + Integer.toHexString((int) b));
}
*/
bytes[doc] = null; // IMPORTANT: allow GC to avoid OOM
if (target.length <= pos + len) {
int newlen = target.length;
while (newlen <= pos + len) {
if ((newlen <<= 1) < 0) { // Double until overflow
newlen =
Integer.MAX_VALUE
- 16; // ArrayList.MAX_ARRAY_SIZE says 8. We double that to be sure
if (newlen <= pos + len) {
throw new IllegalStateException(
"Too many terms (> Integer.MAX_VALUE-16) to uninvert field '"
+ field
+ "'");
}
}
}
byte[] newtarget = new byte[newlen];
System.arraycopy(target, 0, newtarget, 0, pos);
target = newtarget;
}
System.arraycopy(arr, 0, target, pos, len);
pos += len + 1; // skip single byte at end and leave it 0 for terminator
}
}
}
// shrink array
if (pos < target.length) {
byte[] newtarget = new byte[pos];
System.arraycopy(target, 0, newtarget, 0, pos);
target = newtarget;
}
tnums[pass] = target;
if ((pass << 16) > maxDoc) break;
}
}
indexedTermsArray = indexedTerms.toArray(new BytesRef[0]);
long endTime = System.nanoTime();
total_time = (int) TimeUnit.MILLISECONDS.convert(endTime - startTime, TimeUnit.NANOSECONDS);
phase1_time = (int) TimeUnit.MILLISECONDS.convert(midPoint - startTime, TimeUnit.NANOSECONDS);
}