def to_parquet()

in awswrangler/s3/_write_parquet.py [0:0]


def to_parquet(
    df: pd.DataFrame,
    path: str | None = None,
    index: bool = False,
    compression: str | None = "snappy",
    pyarrow_additional_kwargs: dict[str, Any] | None = None,
    max_rows_by_file: int | None = None,
    use_threads: bool | int = True,
    boto3_session: boto3.Session | None = None,
    s3_additional_kwargs: dict[str, Any] | None = None,
    sanitize_columns: bool = False,
    dataset: bool = False,
    filename_prefix: str | None = None,
    partition_cols: list[str] | None = None,
    bucketing_info: BucketingInfoTuple | None = None,
    concurrent_partitioning: bool = False,
    mode: Literal["append", "overwrite", "overwrite_partitions"] | None = None,
    catalog_versioning: bool = False,
    schema_evolution: bool = True,
    database: str | None = None,
    table: str | None = None,
    glue_table_settings: GlueTableSettings | None = None,
    dtype: dict[str, str] | None = None,
    athena_partition_projection_settings: typing.AthenaPartitionProjectionSettings | None = None,
    catalog_id: str | None = None,
    encryption_configuration: ArrowEncryptionConfiguration | None = None,