aiops/ContraLSP/rare/rare_feature.py [178:238]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        output = (output ** 2).sum(dim=-1)
        return output.unsqueeze(-1)

    if "fo" in explainers:
        fo_saliency = torch.zeros(size=true_saliency.shape, device=device)
        for k in range(N_ex):  # We compute the attribution for each individual time series
            x = X[k, :, :]
            # Feature Occlusion attribution
            fo = FO(f=f)
            fo_attr = fo.attribute(x)
            fo_saliency[k, :, :] = fo_attr
        # Save everything in the directory
        with open(os.path.join(save_dir, f"fo_saliency_{cv}.pkl"), "wb") as file:
            pkl.dump(fo_saliency, file)
        print("==============fo==============")
        print_results(fo_saliency, true_saliency)

    if "fp" in explainers:
        fp_saliency = torch.zeros(size=true_saliency.shape, device=device)
        for k in range(N_ex):  # We compute the attribution for each individual time series
            x = X[k, :, :]
            # Feature Permutation attribution
            fp = FP(f=f)
            fp_attr = fp.attribute(x)
            fp_saliency[k, :, :] = fp_attr
        with open(os.path.join(save_dir, f"fp_saliency_{cv}.pkl"), "wb") as file:
            pkl.dump(fp_saliency, file)
        print("==============fp==============")
        print_results(fp_saliency, true_saliency)

    if "ig" in explainers:
        ig_saliency = torch.zeros(size=true_saliency.shape, device=device)
        for k in range(N_ex):  # We compute the attribution for each individual time series
            x = X[k, :, :]
            # Integrated Gradient attribution
            ig = IG(f=f)
            ig_attr = ig.attribute(x)
            ig_saliency[k, :, :] = ig_attr
        with open(os.path.join(save_dir, f"ig_saliency_{cv}.pkl"), "wb") as file:
            pkl.dump(ig_saliency, file)
        print("==============ig==============")
        print_results(ig_saliency, true_saliency)

    if "shap" in explainers:
        shap_saliency = torch.zeros(size=true_saliency.shape, device=device)
        for k in range(N_ex):  # We compute the attribution for each individual time series
            x = X[k, :, :]
            # Sampling Shapley Value attribution
            shap = SVS(f=f)
            shap_attr = shap.attribute(x)
            shap_saliency[k, :, :] = shap_attr
        with open(os.path.join(save_dir, f"shap_saliency_{cv}.pkl"), "wb") as file:
            pkl.dump(shap_saliency, file)
        print("==============shap==============")
        print_results(shap_saliency, true_saliency)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cv", default=0, type=int)
    parser.add_argument("--print_result", default=True, type=bool)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



aiops/ContraLSP/rare/rare_time.py [184:244]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        output = (output ** 2).sum(dim=-1)
        return output.unsqueeze(-1)

    if "fo" in explainers:
        fo_saliency = torch.zeros(size=true_saliency.shape, device=device)
        for k in range(N_ex):  # We compute the attribution for each individual time series
            x = X[k, :, :]
            # Feature Occlusion attribution
            fo = FO(f=f)
            fo_attr = fo.attribute(x)
            fo_saliency[k, :, :] = fo_attr
        # Save everything in the directory
        with open(os.path.join(save_dir, f"fo_saliency_{cv}.pkl"), "wb") as file:
            pkl.dump(fo_saliency, file)
        print("==============fo==============")
        print_results(fo_saliency, true_saliency)

    if "fp" in explainers:
        fp_saliency = torch.zeros(size=true_saliency.shape, device=device)
        for k in range(N_ex):  # We compute the attribution for each individual time series
            x = X[k, :, :]
            # Feature Permutation attribution
            fp = FP(f=f)
            fp_attr = fp.attribute(x)
            fp_saliency[k, :, :] = fp_attr
        with open(os.path.join(save_dir, f"fp_saliency_{cv}.pkl"), "wb") as file:
            pkl.dump(fp_saliency, file)
        print("==============fp==============")
        print_results(fp_saliency, true_saliency)

    if "ig" in explainers:
        ig_saliency = torch.zeros(size=true_saliency.shape, device=device)
        for k in range(N_ex):  # We compute the attribution for each individual time series
            x = X[k, :, :]
            # Integrated Gradient attribution
            ig = IG(f=f)
            ig_attr = ig.attribute(x)
            ig_saliency[k, :, :] = ig_attr
        with open(os.path.join(save_dir, f"ig_saliency_{cv}.pkl"), "wb") as file:
            pkl.dump(ig_saliency, file)
        print("==============ig==============")
        print_results(ig_saliency, true_saliency)

    if "shap" in explainers:
        shap_saliency = torch.zeros(size=true_saliency.shape, device=device)
        for k in range(N_ex):  # We compute the attribution for each individual time series
            x = X[k, :, :]
            # Sampling Shapley Value attribution
            shap = SVS(f=f)
            shap_attr = shap.attribute(x)
            shap_saliency[k, :, :] = shap_attr
        with open(os.path.join(save_dir, f"shap_saliency_{cv}.pkl"), "wb") as file:
            pkl.dump(shap_saliency, file)
        print("==============shap==============")
        print_results(shap_saliency, true_saliency)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cv", default=0, type=int)
    parser.add_argument("--print_result", default=True, type=bool)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



