training/flax/run_long_form_transcription.py [111:144]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            )
        },
    )
    return_timestamps: Optional[bool] = field(
        default=False,
        metadata={
            "help": "Whether to predict timestamps (alongside the text predictions). Timestamp predictions "
            "are discarded at the end of inference, but may assist in the model in reducing hallucinations."
        },
    )
    length_penalty: Optional[float] = field(
        default=1.0,
        metadata={
            "help": (
                "Exponential penalty to the length that is used with beam-based generation. It is applied as an "
                "exponent to the sequence length, which in turn is used to divide the score of the sequence. Since "
                "the score is the log likelihood of the sequence (i.e. negative), length_penalty > 1.0 promotes "
                "longer sequences, while length_penalty < 1.0 encourages shorter sequences."
            )
        },
    )
    do_sample: Optional[bool] = field(
        default=False,
        metadata={"help": "Whether or not to use sampling ; use greedy decoding otherwise."},
    )
    top_k: Optional[int] = field(
        default=50,
        metadata={"help": "The number of the highest probability vocabulary tokens to keep for top-k-filtering."},
    )
    temperature: Optional[float] = field(
        default=1.0,
        metadata={"help": "The value used to modulate the next token probabilities if sampling."},
    )
    chunk_length_s: Optional[float] = field(
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



training/flax/run_pt_long_form_transcription.py [99:132]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            )
        },
    )
    return_timestamps: Optional[bool] = field(
        default=False,
        metadata={
            "help": "Whether to predict timestamps (alongside the text predictions). Timestamp predictions "
            "are discarded at the end of inference, but may assist in the model in reducing hallucinations."
        },
    )
    length_penalty: Optional[float] = field(
        default=1.0,
        metadata={
            "help": (
                "Exponential penalty to the length that is used with beam-based generation. It is applied as an "
                "exponent to the sequence length, which in turn is used to divide the score of the sequence. Since "
                "the score is the log likelihood of the sequence (i.e. negative), length_penalty > 1.0 promotes "
                "longer sequences, while length_penalty < 1.0 encourages shorter sequences."
            )
        },
    )
    do_sample: Optional[bool] = field(
        default=False,
        metadata={"help": "Whether or not to use sampling ; use greedy decoding otherwise."},
    )
    top_k: Optional[int] = field(
        default=50,
        metadata={"help": "The number of the highest probability vocabulary tokens to keep for top-k-filtering."},
    )
    temperature: Optional[float] = field(
        default=1.0,
        metadata={"help": "The value used to modulate the next token probabilities if sampling."},
    )
    chunk_length_s: Optional[float] = field(
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



