override def filter()

in spark/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/NaiveEncoder.scala [230:260]


  override def filter(input: Array[Byte], offset1: Int, length1: Int,
                      filterBytes: Array[Byte], offset2: Int, length2: Int,
                      ops: JavaBytesEncoder): Boolean = {
    filterBytes(offset2) match {
      case ShortEnc =>
        val in = Bytes.toShort(input, offset1)
        val value = Bytes.toShort(filterBytes, offset2 + 1)
        compare(in.compareTo(value), ops)
      case IntEnc =>
        val in = Bytes.toInt(input, offset1)
        val value = Bytes.toInt(filterBytes, offset2 + 1)
        compare(in.compareTo(value), ops)
      case LongEnc | TimestampEnc =>
        val in = Bytes.toLong(input, offset1)
        val value = Bytes.toLong(filterBytes, offset2 + 1)
        compare(in.compareTo(value), ops)
      case FloatEnc =>
        val in = Bytes.toFloat(input, offset1)
        val value = Bytes.toFloat(filterBytes, offset2 + 1)
        compare(in.compareTo(value), ops)
      case DoubleEnc =>
        val in = Bytes.toDouble(input, offset1)
        val value = Bytes.toDouble(filterBytes, offset2 + 1)
        compare(in.compareTo(value), ops)
      case _ =>
        // for String, Byte, Binary, Boolean and other types
        // we can use the order of byte array directly.
        compare(
          Bytes.compareTo(input, offset1, length1, filterBytes, offset2 + 1, length2 - 1), ops)
    }
  }