src/autotrain/trainers/clm/utils.py [654:760]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def configure_logging_steps(config, train_data, valid_data):
    """
    Configures the logging steps for training based on the provided configuration and data.

    Parameters:
    config (object): Configuration object containing training parameters, including `logging_steps`, `valid_split`, and `batch_size`.
    train_data (iterable): Training dataset.
    valid_data (iterable): Validation dataset.

    Returns:
    int: The number of logging steps to be used during training.

    Notes:
    - If `config.logging_steps` is set to -1, the function calculates logging steps based on 20% of the length of the validation data (if `valid_split` is provided) or the training data.
    - The calculated logging steps are constrained to be between 1 and 25.
    - If `config.logging_steps` is not -1, the function uses the provided value.
    """
    logger.info("configuring logging steps")
    if config.logging_steps == -1:
        if config.valid_split is not None:
            logging_steps = int(0.2 * len(valid_data) / config.batch_size)
        else:
            logging_steps = int(0.2 * len(train_data) / config.batch_size)
        if logging_steps == 0:
            logging_steps = 1
        if logging_steps > 25:
            logging_steps = 25
        config.logging_steps = logging_steps
    else:
        logging_steps = config.logging_steps
    logger.info(f"Logging steps: {logging_steps}")
    return logging_steps


def configure_training_args(config, logging_steps):
    """
    Configures the training arguments for a language model based on the provided configuration.

    Args:
        config (object): Configuration object containing various training parameters.
        logging_steps (int): Number of steps between logging events.

    Returns:
        dict: A dictionary containing the configured training arguments.

    The configuration object `config` should have the following attributes:
        - project_name (str): The name of the project, used as the output directory.
        - batch_size (int): Batch size for both training and evaluation.
        - lr (float): Learning rate.
        - epochs (int): Number of training epochs.
        - eval_strategy (str): Evaluation strategy, e.g., "steps" or "epoch".
        - valid_split (float or None): Validation split ratio. If None, evaluation is disabled.
        - save_total_limit (int): Maximum number of checkpoints to save.
        - gradient_accumulation (int): Number of gradient accumulation steps.
        - log (str): Logging destination, e.g., "tensorboard".
        - auto_find_batch_size (bool): Whether to automatically find the optimal batch size.
        - scheduler (str): Learning rate scheduler type.
        - optimizer (str): Optimizer type.
        - warmup_ratio (float): Warmup ratio for learning rate scheduling.
        - weight_decay (float): Weight decay for the optimizer.
        - max_grad_norm (float): Maximum gradient norm for clipping.
        - disable_gradient_checkpointing (bool): Whether to disable gradient checkpointing.
        - peft (bool): Whether to use Parameter-Efficient Fine-Tuning (PEFT).
        - quantization (str): Quantization type, e.g., "int4" or "int8".
        - mixed_precision (str): Mixed precision type, e.g., "fp16" or "bf16".

    The function also sets additional training arguments based on the provided configuration,
    such as enabling gradient checkpointing and mixed precision training.
    """
    logger.info("configuring training args")
    training_args = dict(
        output_dir=config.project_name,
        per_device_train_batch_size=config.batch_size,
        per_device_eval_batch_size=config.batch_size,
        learning_rate=config.lr,
        num_train_epochs=config.epochs,
        eval_strategy=config.eval_strategy if config.valid_split is not None else "no",
        logging_steps=logging_steps,
        save_total_limit=config.save_total_limit,
        save_strategy=config.eval_strategy if config.valid_split is not None else "no",
        gradient_accumulation_steps=config.gradient_accumulation,
        report_to=config.log,
        auto_find_batch_size=config.auto_find_batch_size,
        lr_scheduler_type=config.scheduler,
        optim=config.optimizer,
        warmup_ratio=config.warmup_ratio,
        weight_decay=config.weight_decay,
        max_grad_norm=config.max_grad_norm,
        push_to_hub=False,
        load_best_model_at_end=True if config.valid_split is not None else False,
        ddp_find_unused_parameters=False,
        gradient_checkpointing=not config.disable_gradient_checkpointing,
        remove_unused_columns=False,
    )

    if not config.disable_gradient_checkpointing:
        if config.peft and config.quantization in ("int4", "int8"):
            training_args["gradient_checkpointing_kwargs"] = {"use_reentrant": True}
        else:
            training_args["gradient_checkpointing_kwargs"] = {"use_reentrant": False}

    if config.mixed_precision == "fp16":
        training_args["fp16"] = True
    if config.mixed_precision == "bf16":
        training_args["bf16"] = True

    return training_args
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



src/autotrain/trainers/vlm/utils.py [127:183]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def configure_logging_steps(config, train_data, valid_data):
    logger.info("configuring logging steps")
    if config.logging_steps == -1:
        if config.valid_split is not None:
            logging_steps = int(0.2 * len(valid_data) / config.batch_size)
        else:
            logging_steps = int(0.2 * len(train_data) / config.batch_size)
        if logging_steps == 0:
            logging_steps = 1
        if logging_steps > 25:
            logging_steps = 25
        config.logging_steps = logging_steps
    else:
        logging_steps = config.logging_steps
    logger.info(f"Logging steps: {logging_steps}")
    return logging_steps


def configure_training_args(config, logging_steps):
    logger.info("configuring training args")
    training_args = dict(
        output_dir=config.project_name,
        per_device_train_batch_size=config.batch_size,
        per_device_eval_batch_size=config.batch_size,
        learning_rate=config.lr,
        num_train_epochs=config.epochs,
        eval_strategy=config.eval_strategy if config.valid_split is not None else "no",
        logging_steps=logging_steps,
        save_total_limit=config.save_total_limit,
        save_strategy=config.eval_strategy if config.valid_split is not None else "no",
        gradient_accumulation_steps=config.gradient_accumulation,
        report_to=config.log,
        auto_find_batch_size=config.auto_find_batch_size,
        lr_scheduler_type=config.scheduler,
        optim=config.optimizer,
        warmup_ratio=config.warmup_ratio,
        weight_decay=config.weight_decay,
        max_grad_norm=config.max_grad_norm,
        push_to_hub=False,
        load_best_model_at_end=True if config.valid_split is not None else False,
        ddp_find_unused_parameters=False,
        gradient_checkpointing=not config.disable_gradient_checkpointing,
        remove_unused_columns=False,
    )

    if not config.disable_gradient_checkpointing:
        if config.peft and config.quantization in ("int4", "int8"):
            training_args["gradient_checkpointing_kwargs"] = {"use_reentrant": True}
        else:
            training_args["gradient_checkpointing_kwargs"] = {"use_reentrant": False}

    if config.mixed_precision == "fp16":
        training_args["fp16"] = True
    if config.mixed_precision == "bf16":
        training_args["bf16"] = True

    return training_args
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



