def run()

in optimum/commands/neuron/cache.py [0:0]


    def run(self):
        runner = ExampleRunner(self.args.model, self.args.task, example_dir=self.args.example_dir)
        if self.args.eval_batch_size is None:
            self.args.eval_batch_size = self.args.train_batch_size

        if self.args.sequence_length is not None:
            sequence_length = self.args.sequence_length
        elif self.args.encoder_sequence_length is None and self.args.decoder_sequence_length is None:
            raise ValueError(
                "You need to specify either sequence_length or encoder_sequence_length and decoder_sequence_length"
            )
        elif self.args.encoder_sequence_length is None or self.args.decoder_sequence_length is None:
            raise ValueError("Both the encoder_sequence_length and the decoder_sequence_length must be provided.")
        else:
            sequence_length = [self.args.encoder_sequence_length, self.args.decoder_sequence_length]
        returncode, stdout = runner.run(
            self.args.num_cores,
            self.args.precision,
            self.args.train_batch_size,
            sequence_length,
            do_eval=True,
            eval_batch_size=self.args.eval_batch_size,
            gradient_accumulation_steps=self.args.gradient_accumulation_steps,
            num_epochs=3,
            max_steps=self.args.max_steps,
            save_steps=self.args.max_steps // 2,
        )
        if returncode != 0:
            raise ValueError(f"Could not add the model to the cache. Full log:\n{stdout}.")