experiments/codes/experiment/checkpointable_multitask_experiment.py [153:192]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                for epoch in range(self.epoch, self.config.model.num_epochs):
                    self.logbook.write_message_logs(f"Training rule {train_rule_world}")
                    # ipdb.set_trace()
                    self.logbook.write_message_logs(
                        f"Choosing to train the model " f"on {train_rule_world}"
                    )
                    # Train, optimize and test on the same world
                    train_data = self.dataloaders["train"][train_rule_world]
                    self.train(train_data, train_rule_world, epoch, task_idx=task_idx)
                    metrics = self.eval(
                        {train_rule_world: self.dataloaders["train"][train_rule_world]},
                        epoch=epoch,
                        mode="valid",
                        data_mode="train",
                        task_idx=task_idx,
                    )
                    for sched in self.schedulers:
                        sched.step(metrics["loss"])

                # current task performance
                self.eval(
                    {train_rule_world: self.dataloaders["train"][train_rule_world]},
                    epoch=epoch,
                    mode="test",
                    data_mode="train",
                )
                if task_idx > 0:
                    # previous tasks performance
                    self.eval(
                        {
                            task: self.dataloaders["train"][
                                full_train_world_names[task_idx]
                            ]
                            for task in train_world_names[:task_idx]
                        },
                        epoch=epoch,
                        mode="test",
                        data_mode="train_prev",
                    )
                self.periodic_save(task_idx)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



experiments/codes/experiment/checkpointable_multitask_experiment.py [224:262]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                for epoch in range(self.epoch, self.config.model.num_epochs):
                    self.logbook.write_message_logs(f"Training rule {train_rule_world}")
                    # ipdb.set_trace()
                    self.logbook.write_message_logs(
                        f"Choosing to train the model " f"on {train_rule_world}"
                    )
                    # Train, optimize and test on the same world
                    train_data = self.dataloaders["train"][train_rule_world]
                    self.train(train_data, train_rule_world, epoch, task_idx=task_idx)
                    metrics = self.eval(
                        {train_rule_world: self.dataloaders["train"][train_rule_world]},
                        epoch=epoch,
                        mode="valid",
                        data_mode="train",
                        task_idx=task_idx,
                    )
                    for sched in self.schedulers:
                        sched.step(metrics["loss"])
                # current task performance
                self.eval(
                    {train_rule_world: self.dataloaders["train"][train_rule_world]},
                    epoch=epoch,
                    mode="test",
                    data_mode="train",
                )
                if task_idx > 0:
                    # previous tasks performance
                    self.eval(
                        {
                            task: self.dataloaders["train"][
                                full_train_world_names[task_idx]
                            ]
                            for task in train_world_names[:task_idx]
                        },
                        epoch=epoch,
                        mode="test",
                        data_mode="train_prev",
                    )
                self.periodic_save(task_idx)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



