in spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala [1150:1204]
def mergeIntersect(other: RowKeyFilter): RowKeyFilter = {
val survivingPoints = new mutable.MutableList[Array[Byte]]()
val didntSurviveFirstPassPoints = new mutable.MutableList[Array[Byte]]()
if (points == null || points.length == 0) {
other.points.foreach(
otherP => {
didntSurviveFirstPassPoints += otherP
})
} else {
points.foreach(
p => {
if (other.points.length == 0) {
didntSurviveFirstPassPoints += p
} else {
other.points.foreach(
otherP => {
if (Bytes.equals(p, otherP)) {
survivingPoints += p
} else {
didntSurviveFirstPassPoints += p
}
})
}
})
}
val survivingRanges = new mutable.MutableList[ScanRange]()
if (ranges.length == 0) {
didntSurviveFirstPassPoints.foreach(
p => {
survivingPoints += p
})
} else {
ranges.foreach(
r => {
other.ranges.foreach(
otherR => {
val overLapScanRange = r.getOverLapScanRange(otherR)
if (overLapScanRange != null) {
survivingRanges += overLapScanRange
}
})
didntSurviveFirstPassPoints.foreach(
p => {
if (r.containsPoint(p)) {
survivingPoints += p
}
})
})
}
points = survivingPoints
ranges = survivingRanges
this
}