in trl/trainer/kto_trainer.py [0:0]
def generate_from_model_and_ref(self, model, batch: dict[str, torch.LongTensor]) -> tuple[str, str]:
"""Generate samples from the model and reference model for the given batch of inputs."""
# If one uses `generate_during_eval` with peft + bf16, we need to explicitly call generate with
# the torch amp context manager as some hidden states are silently casted to full precision.
generate_context_manager = (
autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext()
)
with generate_context_manager:
policy_output = model.generate(
input_ids=batch["prompt_input_ids"],
attention_mask=batch["prompt_attention_mask"],
max_length=self.max_length,
do_sample=True,
pad_token_id=self.processing_class.pad_token_id,
)
# if reference_output in batch use that otherwise use the reference model
if "reference_output" in batch:
reference_output = batch["reference_output"]
else:
if self.ref_model is None:
with self.null_ref_context():
reference_output = self.model.generate(
input_ids=batch["prompt_input_ids"],
attention_mask=batch["prompt_attention_mask"],
max_length=self.max_length,
do_sample=True,
pad_token_id=self.processing_class.pad_token_id,
)
else:
reference_output = self.ref_model.generate(
input_ids=batch["prompt_input_ids"],
attention_mask=batch["prompt_attention_mask"],
max_length=self.max_length,
do_sample=True,
pad_token_id=self.processing_class.pad_token_id,
)
policy_output = pad_to_length(policy_output, self.max_length, self.processing_class.pad_token_id)
policy_output_decoded = self.processing_class.batch_decode(policy_output, skip_special_tokens=True)
reference_output = pad_to_length(reference_output, self.max_length, self.processing_class.pad_token_id)
reference_output_decoded = self.processing_class.batch_decode(reference_output, skip_special_tokens=True)
return policy_output_decoded, reference_output_decoded