def run()

in benchmark/supervised/train.py [0:0]


def run(config):
    version = config['version']
    for dataset_name, dconf in config['datasets'].items():
        cprint("[%s]\n" % dataset_name, 'yellow')
        batch_size = dconf['batch_size']
        architecture = dconf['architecture']
        epochs = dconf['epochs']
        train_steps = dconf['train_steps']
        val_steps = dconf['val_steps']
        shape = dconf['shape']
        embedding_size = dconf['embedding_size']
        trainable = dconf['trainable']
        distance = dconf['distance']

        cprint("|-loading dataset", 'blue')
        x_train, y_train = load_dataset(version, dataset_name, 'train')
        x_test, y_test = load_dataset(version, dataset_name, 'test')
        print("shapes x:", x_train.shape, 'y:', y_train.shape)

        for lparams in dconf['losses']:
            cprint("Training %s" % lparams['name'], 'green')

            stub = "models/%s/%s_%s/" % (version, dataset_name, lparams['name'])

            # cleanup dir
            clean_dir(stub)

            # build loss
            loss = make_loss(distance, lparams)
            optim = Adam(lparams['lr'])
            callbacks = [ModelCheckpoint(stub)]

            model = EfficientNetSim(shape,
                                    embedding_size,
                                    variant=architecture,
                                    trainable=trainable)

            model.compile(optimizer=optim, loss=loss)
            history = model.fit(x_train,
                                y_train,
                                batch_size=batch_size,
                                steps_per_epoch=train_steps,
                                epochs=epochs,
                                validation_data=(x_test, y_test),
                                callbacks=callbacks,
                                validation_steps=val_steps)
            # save history
            with open("%shistory.json" % stub, 'w') as o:
                o.write(json.dumps(history.history))