processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepWithNoConverterImpl.java [130:165]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  private void initializeBucketColumnPartitioner() {
    List<Integer> indexes = new ArrayList<>();
    List<ColumnSchema> columnSchemas = new ArrayList<>();
    DataField[] inputDataFields = getOutput();
    BucketingInfo bucketingInfo = configuration.getBucketingInfo();
    for (int i = 0; i < inputDataFields.length; i++) {
      for (int j = 0; j < bucketingInfo.getListOfColumns().size(); j++) {
        if (inputDataFields[i].getColumn().getColName()
                .equals(bucketingInfo.getListOfColumns().get(j).getColumnName())) {
          indexes.add(i);
          columnSchemas.add(inputDataFields[i].getColumn().getColumnSchema());
          break;
        }
      }
    }

    // hash partitioner to dispatch rows by bucket column
    if (CarbonCommonConstants.BUCKET_HASH_METHOD_DEFAULT.equals(
            configuration.getBucketHashMethod())) {
      // keep consistent with both carbon and spark tables.
      this.partitioner = new SparkHashExpressionPartitionerImpl(
              indexes, columnSchemas, bucketingInfo.getNumOfRanges());
    } else if (CarbonCommonConstants.BUCKET_HASH_METHOD_NATIVE.equals(
            configuration.getBucketHashMethod())) {
      // native does not keep consistent with spark, it just use java hash method directly such as
      // Long, String, etc. May have better performance during convert process.
      // But, do not use it when the table need to join with spark bucket tables!
      this.partitioner = new HashPartitionerImpl(
              indexes, columnSchemas, bucketingInfo.getNumOfRanges());
    } else {
      // by default we use SparkHashExpressionPartitionerImpl hash.
      this.partitioner = new SparkHashExpressionPartitionerImpl(
              indexes, columnSchemas, bucketingInfo.getNumOfRanges());
    }

  }
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



processing/src/main/java/org/apache/carbondata/processing/loading/steps/DataConverterProcessorStepImpl.java [93:128]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  private void initializeBucketColumnPartitioner() {
    List<Integer> indexes = new ArrayList<>();
    List<ColumnSchema> columnSchemas = new ArrayList<>();
    DataField[] inputDataFields = getOutput();
    BucketingInfo bucketingInfo = configuration.getBucketingInfo();
    for (int i = 0; i < inputDataFields.length; i++) {
      for (int j = 0; j < bucketingInfo.getListOfColumns().size(); j++) {
        if (inputDataFields[i].getColumn().getColName()
            .equals(bucketingInfo.getListOfColumns().get(j).getColumnName())) {
          indexes.add(i);
          columnSchemas.add(inputDataFields[i].getColumn().getColumnSchema());
          break;
        }
      }
    }

    // hash partitioner to dispatch rows by bucket column
    if (CarbonCommonConstants.BUCKET_HASH_METHOD_DEFAULT.equals(
        configuration.getBucketHashMethod())) {
      // keep consistent with both carbon and spark tables.
      this.partitioner = new SparkHashExpressionPartitionerImpl(
              indexes, columnSchemas, bucketingInfo.getNumOfRanges());
    } else if (CarbonCommonConstants.BUCKET_HASH_METHOD_NATIVE.equals(
        configuration.getBucketHashMethod())) {
      // native does not keep consistent with spark, it just use java hash method directly such as
      // Long, String, etc. May have better performance during convert process.
      // But, do not use it when the table need to join with spark bucket tables!
      this.partitioner = new HashPartitionerImpl(
              indexes, columnSchemas, bucketingInfo.getNumOfRanges());
    } else {
      // by default we use SparkHashExpressionPartitionerImpl to hash.
      this.partitioner = new SparkHashExpressionPartitionerImpl(
              indexes, columnSchemas, bucketingInfo.getNumOfRanges());
    }

  }
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



