amoro-format-mixed/amoro-mixed-spark/v3.5/amoro-mixed-spark-3.5/src/main/java/org/apache/amoro/spark/io/TaskWriters.java [241:282]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  public TaskWriter<InternalRow> newUnkeyedUpsertWriter() {
    preconditions();
    Schema schema = table.schema();
    InternalRowFileAppenderFactory build =
        new InternalRowFileAppenderFactory.Builder(table.asUnkeyedTable(), schema, dsSchema)
            .build();
    long fileSizeBytes =
        PropertyUtil.propertyAsLong(
            table.properties(),
            TableProperties.WRITE_TARGET_FILE_SIZE_BYTES,
            TableProperties.WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT);
    long mask =
        PropertyUtil.propertyAsLong(
                table.properties(),
                TableProperties.BASE_FILE_INDEX_HASH_BUCKET,
                TableProperties.BASE_FILE_INDEX_HASH_BUCKET_DEFAULT)
            - 1;
    CommonOutputFileFactory commonOutputFileFactory =
        new CommonOutputFileFactory(
            table.location(),
            table.spec(),
            fileFormat,
            table.io(),
            table.asUnkeyedTable().encryption(),
            partitionId,
            taskId,
            transactionId);
    SparkBaseTaskWriter sparkBaseTaskWriter =
        new SparkBaseTaskWriter(
            fileFormat,
            build,
            commonOutputFileFactory,
            table.io(),
            fileSizeBytes,
            mask,
            schema,
            table.spec(),
            null,
            orderedWriter);
    return new UnkeyedUpsertSparkWriter<>(
        table, build, commonOutputFileFactory, fileFormat, schema, sparkBaseTaskWriter);
  }
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



amoro-format-mixed/amoro-mixed-spark/v3.2/amoro-mixed-spark-3.2/src/main/java/org/apache/amoro/spark/io/TaskWriters.java [241:282]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  public TaskWriter<InternalRow> newUnkeyedUpsertWriter() {
    preconditions();
    Schema schema = table.schema();
    InternalRowFileAppenderFactory build =
        new InternalRowFileAppenderFactory.Builder(table.asUnkeyedTable(), schema, dsSchema)
            .build();
    long fileSizeBytes =
        PropertyUtil.propertyAsLong(
            table.properties(),
            TableProperties.WRITE_TARGET_FILE_SIZE_BYTES,
            TableProperties.WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT);
    long mask =
        PropertyUtil.propertyAsLong(
                table.properties(),
                TableProperties.BASE_FILE_INDEX_HASH_BUCKET,
                TableProperties.BASE_FILE_INDEX_HASH_BUCKET_DEFAULT)
            - 1;
    CommonOutputFileFactory commonOutputFileFactory =
        new CommonOutputFileFactory(
            table.location(),
            table.spec(),
            fileFormat,
            table.io(),
            table.asUnkeyedTable().encryption(),
            partitionId,
            taskId,
            transactionId);
    SparkBaseTaskWriter sparkBaseTaskWriter =
        new SparkBaseTaskWriter(
            fileFormat,
            build,
            commonOutputFileFactory,
            table.io(),
            fileSizeBytes,
            mask,
            schema,
            table.spec(),
            null,
            orderedWriter);
    return new UnkeyedUpsertSparkWriter<>(
        table, build, commonOutputFileFactory, fileFormat, schema, sparkBaseTaskWriter);
  }
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-3.3/src/main/java/org/apache/amoro/spark/io/TaskWriters.java [241:282]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  public TaskWriter<InternalRow> newUnkeyedUpsertWriter() {
    preconditions();
    Schema schema = table.schema();
    InternalRowFileAppenderFactory build =
        new InternalRowFileAppenderFactory.Builder(table.asUnkeyedTable(), schema, dsSchema)
            .build();
    long fileSizeBytes =
        PropertyUtil.propertyAsLong(
            table.properties(),
            TableProperties.WRITE_TARGET_FILE_SIZE_BYTES,
            TableProperties.WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT);
    long mask =
        PropertyUtil.propertyAsLong(
                table.properties(),
                TableProperties.BASE_FILE_INDEX_HASH_BUCKET,
                TableProperties.BASE_FILE_INDEX_HASH_BUCKET_DEFAULT)
            - 1;
    CommonOutputFileFactory commonOutputFileFactory =
        new CommonOutputFileFactory(
            table.location(),
            table.spec(),
            fileFormat,
            table.io(),
            table.asUnkeyedTable().encryption(),
            partitionId,
            taskId,
            transactionId);
    SparkBaseTaskWriter sparkBaseTaskWriter =
        new SparkBaseTaskWriter(
            fileFormat,
            build,
            commonOutputFileFactory,
            table.io(),
            fileSizeBytes,
            mask,
            schema,
            table.spec(),
            null,
            orderedWriter);
    return new UnkeyedUpsertSparkWriter<>(
        table, build, commonOutputFileFactory, fileFormat, schema, sparkBaseTaskWriter);
  }
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



