scripts/setfit/run_fewshot.py [154:177]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            if args.classifier == "pytorch":
                trainer.freeze()
                trainer.train()
                trainer.unfreeze(keep_body_frozen=args.keep_body_frozen)
                trainer.train(
                    num_epochs=25,
                    body_learning_rate=1e-5,
                    learning_rate=args.lr,  # recommend: 1e-2
                    l2_weight=0.0,
                    batch_size=args.batch_size,
                )
            else:
                trainer.train()

            # Evaluate the model on the test data
            metrics = trainer.evaluate()
            print(f"Metrics: {metrics}")

            with open(results_path, "w") as f_out:
                json.dump(
                    {"score": metrics[metric] * 100, "measure": metric},
                    f_out,
                    sort_keys=True,
                )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



scripts/setfit/run_zeroshot.py [149:172]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    if args.classifier == "pytorch":
        trainer.freeze()
        trainer.train()
        trainer.unfreeze(keep_body_frozen=args.keep_body_frozen)
        trainer.train(
            num_epochs=25,
            body_learning_rate=1e-5,
            learning_rate=args.lr,  # recommend: 1e-2
            l2_weight=0.0,
            batch_size=args.batch_size,
        )
    else:
        trainer.train()

    # Evaluate the model on the test data
    metrics = trainer.evaluate()
    print(f"Metrics: {metrics}")

    with open(results_path, "w") as f_out:
        json.dump(
            {"score": metrics[metric] * 100, "measure": metric},
            f_out,
            sort_keys=True,
        )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



