amoro-format-mixed/amoro-mixed-spark/v3.5/amoro-mixed-spark-3.5/src/main/java/org/apache/amoro/spark/io/TaskWriters.java [119:190]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  public TaskWriter<InternalRow> newBaseWriter(boolean isOverwrite) {
    preconditions();

    String baseLocation;
    EncryptionManager encryptionManager;
    Schema schema;
    PrimaryKeySpec primaryKeySpec = null;
    Table icebergTable;

    if (table.isKeyedTable()) {
      KeyedTable keyedTable = table.asKeyedTable();
      baseLocation = keyedTable.baseLocation();
      encryptionManager = keyedTable.baseTable().encryption();
      schema = keyedTable.baseTable().schema();
      primaryKeySpec = keyedTable.primaryKeySpec();
      icebergTable = keyedTable.baseTable();
    } else {
      UnkeyedTable table = this.table.asUnkeyedTable();
      baseLocation = table.location();
      encryptionManager = table.encryption();
      schema = table.schema();
      icebergTable = table;
    }

    FileAppenderFactory<InternalRow> appenderFactory =
        InternalRowFileAppenderFactory.builderFor(icebergTable, schema, dsSchema)
            .writeHive(isHiveTable)
            .build();
    boolean hiveConsistentWrite =
        PropertyUtil.propertyAsBoolean(
            table.properties(),
            HiveTableProperties.HIVE_CONSISTENT_WRITE_ENABLED,
            HiveTableProperties.HIVE_CONSISTENT_WRITE_ENABLED_DEFAULT);
    OutputFileFactory outputFileFactory;
    if (isHiveTable && isOverwrite) {
      outputFileFactory =
          new AdaptHiveOutputFileFactory(
              ((SupportHive) table).hiveLocation(),
              table.spec(),
              fileFormat,
              table.io(),
              encryptionManager,
              partitionId,
              taskId,
              transactionId,
              hiveSubdirectory,
              hiveConsistentWrite);
    } else {
      outputFileFactory =
          new CommonOutputFileFactory(
              baseLocation,
              table.spec(),
              fileFormat,
              table.io(),
              encryptionManager,
              partitionId,
              taskId,
              transactionId);
    }

    return new SparkBaseTaskWriter(
        fileFormat,
        appenderFactory,
        outputFileFactory,
        table.io(),
        fileSize,
        mask,
        schema,
        table.spec(),
        primaryKeySpec,
        orderedWriter);
  }
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



amoro-format-mixed/amoro-mixed-spark/v3.2/amoro-mixed-spark-3.2/src/main/java/org/apache/amoro/spark/io/TaskWriters.java [119:190]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  public TaskWriter<InternalRow> newBaseWriter(boolean isOverwrite) {
    preconditions();

    String baseLocation;
    EncryptionManager encryptionManager;
    Schema schema;
    PrimaryKeySpec primaryKeySpec = null;
    Table icebergTable;

    if (table.isKeyedTable()) {
      KeyedTable keyedTable = table.asKeyedTable();
      baseLocation = keyedTable.baseLocation();
      encryptionManager = keyedTable.baseTable().encryption();
      schema = keyedTable.baseTable().schema();
      primaryKeySpec = keyedTable.primaryKeySpec();
      icebergTable = keyedTable.baseTable();
    } else {
      UnkeyedTable table = this.table.asUnkeyedTable();
      baseLocation = table.location();
      encryptionManager = table.encryption();
      schema = table.schema();
      icebergTable = table;
    }

    FileAppenderFactory<InternalRow> appenderFactory =
        InternalRowFileAppenderFactory.builderFor(icebergTable, schema, dsSchema)
            .writeHive(isHiveTable)
            .build();
    boolean hiveConsistentWrite =
        PropertyUtil.propertyAsBoolean(
            table.properties(),
            HiveTableProperties.HIVE_CONSISTENT_WRITE_ENABLED,
            HiveTableProperties.HIVE_CONSISTENT_WRITE_ENABLED_DEFAULT);
    OutputFileFactory outputFileFactory;
    if (isHiveTable && isOverwrite) {
      outputFileFactory =
          new AdaptHiveOutputFileFactory(
              ((SupportHive) table).hiveLocation(),
              table.spec(),
              fileFormat,
              table.io(),
              encryptionManager,
              partitionId,
              taskId,
              transactionId,
              hiveSubdirectory,
              hiveConsistentWrite);
    } else {
      outputFileFactory =
          new CommonOutputFileFactory(
              baseLocation,
              table.spec(),
              fileFormat,
              table.io(),
              encryptionManager,
              partitionId,
              taskId,
              transactionId);
    }

    return new SparkBaseTaskWriter(
        fileFormat,
        appenderFactory,
        outputFileFactory,
        table.io(),
        fileSize,
        mask,
        schema,
        table.spec(),
        primaryKeySpec,
        orderedWriter);
  }
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-3.3/src/main/java/org/apache/amoro/spark/io/TaskWriters.java [119:190]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  public TaskWriter<InternalRow> newBaseWriter(boolean isOverwrite) {
    preconditions();

    String baseLocation;
    EncryptionManager encryptionManager;
    Schema schema;
    PrimaryKeySpec primaryKeySpec = null;
    Table icebergTable;

    if (table.isKeyedTable()) {
      KeyedTable keyedTable = table.asKeyedTable();
      baseLocation = keyedTable.baseLocation();
      encryptionManager = keyedTable.baseTable().encryption();
      schema = keyedTable.baseTable().schema();
      primaryKeySpec = keyedTable.primaryKeySpec();
      icebergTable = keyedTable.baseTable();
    } else {
      UnkeyedTable table = this.table.asUnkeyedTable();
      baseLocation = table.location();
      encryptionManager = table.encryption();
      schema = table.schema();
      icebergTable = table;
    }

    FileAppenderFactory<InternalRow> appenderFactory =
        InternalRowFileAppenderFactory.builderFor(icebergTable, schema, dsSchema)
            .writeHive(isHiveTable)
            .build();
    boolean hiveConsistentWrite =
        PropertyUtil.propertyAsBoolean(
            table.properties(),
            HiveTableProperties.HIVE_CONSISTENT_WRITE_ENABLED,
            HiveTableProperties.HIVE_CONSISTENT_WRITE_ENABLED_DEFAULT);
    OutputFileFactory outputFileFactory;
    if (isHiveTable && isOverwrite) {
      outputFileFactory =
          new AdaptHiveOutputFileFactory(
              ((SupportHive) table).hiveLocation(),
              table.spec(),
              fileFormat,
              table.io(),
              encryptionManager,
              partitionId,
              taskId,
              transactionId,
              hiveSubdirectory,
              hiveConsistentWrite);
    } else {
      outputFileFactory =
          new CommonOutputFileFactory(
              baseLocation,
              table.spec(),
              fileFormat,
              table.io(),
              encryptionManager,
              partitionId,
              taskId,
              transactionId);
    }

    return new SparkBaseTaskWriter(
        fileFormat,
        appenderFactory,
        outputFileFactory,
        table.io(),
        fileSize,
        mask,
        schema,
        table.spec(),
        primaryKeySpec,
        orderedWriter);
  }
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



