aiops/ContraLSP/rare/rare_time.py [80:176]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    if "gatemask" in explainers:
        trainer = Trainer(
            max_epochs=200,
            accelerator="cpu",
            log_every_n_steps=2,
            logger=TensorBoardLogger(
                save_dir=".",
                version=random.getrandbits(128),
            ),
        )
        mask = GateMaskNet(
            forward_func=f,
            model=nn.Sequential(
                RNN(
                    input_size=X.shape[-1],
                    rnn="gru",
                    hidden_size=X.shape[-1],
                    bidirectional=True,
                ),
                MLP([2 * X.shape[-1], X.shape[-1]]),
            ),
            lambda_1=0.1,   # 0.1 for our lambda is suitable
            lambda_2=0.1,
            optim="adam",
            lr=0.1,
        )
        explainer = GateMask(f)
        _attr = explainer.attribute(
            X,
            trainer=trainer,
            mask_net=mask,
            batch_size=N_ex,
            sigma=0.5,
        )
        gatemask_saliency = _attr.clone().detach()
        with open(os.path.join(save_dir, f"gatemask_saliency_{cv}.pkl"), "wb") as file:
            pkl.dump(gatemask_saliency, file)
        print("==============gatemask==============")
        print_results(gatemask_saliency, true_saliency)

    if "nnmask" in explainers:
        trainer = Trainer(
            max_epochs=200,
            accelerator='cpu',
            log_every_n_steps=2,
            logger=TensorBoardLogger(
                save_dir=".",
                version=random.getrandbits(128),
            ),
        )
        mask = ExtremalMaskNet(
            forward_func=f,
            model=nn.Sequential(
                RNN(
                    input_size=X.shape[-1],
                    rnn="gru",
                    hidden_size=X.shape[-1],
                    bidirectional=True,
                ),
                MLP([2 * X.shape[-1], X.shape[-1]]),
            ),
            optim="adam",
            lr=0.1,
        )
        explainer = ExtremalMask(f)
        _attr = explainer.attribute(
            X,
            trainer=trainer,
            mask_net=mask,
            batch_size=N_ex,
        )
        nnmask_saliency = _attr.clone().detach().numpy()
        with open(os.path.join(save_dir, f"nnmask_saliency_{cv}.pkl"), "wb") as file:
            pkl.dump(nnmask_saliency, file)
        print("==============nnmask==============")
        print_results(nnmask_saliency, true_saliency)

    if "dynamask" in explainers:
        pert = GaussianBlur(device=device)  # We use a Gaussian Blur perturbation operator
        mask_group = MaskGroup(perturbation=pert, device=device, random_seed=random_seed)
        mask_group.fit_multiple(
            f=f,
            X=X,
            area_list=np.arange(0.011, 0.041, 0.002),
            loss_function_multiple=mse_multiple,
            n_epoch=200,
            size_reg_factor_dilation=1000,
            size_reg_factor_init=0.01,
            learning_rate=0.1,
        )
        thresh = 0.01 * torch.ones(N_ex)
        mask = mask_group.get_extremal_mask_multiple(thresh)  # The mask with the lowest error is selected
        dynamask_saliency = mask.clone().detach().numpy()
        with open(os.path.join(save_dir, f"dynamask_saliency_{cv}.pkl"), "wb") as file:
            pkl.dump(dynamask_saliency, file)
        print("==============dynamask==============")
        print_results(dynamask_saliency, true_saliency)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



aiops/ContraLSP/rare/rare_time_diffgroup.py [85:181]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    if "gatemask" in explainers:
        trainer = Trainer(
            max_epochs=200,
            accelerator="cpu",
            log_every_n_steps=2,
            logger=TensorBoardLogger(
                save_dir=".",
                version=random.getrandbits(128),
            ),
        )
        mask = GateMaskNet(
            forward_func=f,
            model=nn.Sequential(
                RNN(
                    input_size=X.shape[-1],
                    rnn="gru",
                    hidden_size=X.shape[-1],
                    bidirectional=True,
                ),
                MLP([2 * X.shape[-1], X.shape[-1]]),
            ),
            lambda_1=0.1,   # 0.1 for our lambda is suitable
            lambda_2=0.1,
            optim="adam",
            lr=0.1,
        )
        explainer = GateMask(f)
        _attr = explainer.attribute(
            X,
            trainer=trainer,
            mask_net=mask,
            batch_size=N_ex,
            sigma=0.5,
        )
        gatemask_saliency = _attr.clone().detach()
        with open(os.path.join(save_dir, f"gatemask_saliency_{cv}.pkl"), "wb") as file:
            pkl.dump(gatemask_saliency, file)
        print("==============gatemask==============")
        print_results(gatemask_saliency, true_saliency)

    if "nnmask" in explainers:
        trainer = Trainer(
            max_epochs=200,
            accelerator='cpu',
            log_every_n_steps=2,
            logger=TensorBoardLogger(
                save_dir=".",
                version=random.getrandbits(128),
            ),
        )
        mask = ExtremalMaskNet(
            forward_func=f,
            model=nn.Sequential(
                RNN(
                    input_size=X.shape[-1],
                    rnn="gru",
                    hidden_size=X.shape[-1],
                    bidirectional=True,
                ),
                MLP([2 * X.shape[-1], X.shape[-1]]),
            ),
            optim="adam",
            lr=0.1,
        )
        explainer = ExtremalMask(f)
        _attr = explainer.attribute(
            X,
            trainer=trainer,
            mask_net=mask,
            batch_size=N_ex,
        )
        nnmask_saliency = _attr.clone().detach().numpy()
        with open(os.path.join(save_dir, f"nnmask_saliency_{cv}.pkl"), "wb") as file:
            pkl.dump(nnmask_saliency, file)
        print("==============nnmask==============")
        print_results(nnmask_saliency, true_saliency)

    if "dynamask" in explainers:
        pert = GaussianBlur(device=device)  # We use a Gaussian Blur perturbation operator
        mask_group = MaskGroup(perturbation=pert, device=device, random_seed=random_seed)
        mask_group.fit_multiple(
            f=f,
            X=X,
            area_list=np.arange(0.011, 0.041, 0.002),
            loss_function_multiple=mse_multiple,
            n_epoch=200,
            size_reg_factor_dilation=1000,
            size_reg_factor_init=0.01,
            learning_rate=0.1,
        )
        thresh = 0.01 * torch.ones(N_ex)
        mask = mask_group.get_extremal_mask_multiple(thresh)  # The mask with the lowest error is selected
        dynamask_saliency = mask.clone().detach().numpy()
        with open(os.path.join(save_dir, f"dynamask_saliency_{cv}.pkl"), "wb") as file:
            pkl.dump(dynamask_saliency, file)
        print("==============dynamask==============")
        print_results(dynamask_saliency, true_saliency)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



