def rmac()

in eval_retrieval.py [0:0]


def rmac(features, rmac_levels, pca=None):
    nim, nc, xd, yd = features.size()

    rmac_regions = image_helper.get_rmac_region_coordinates(xd, yd, rmac_levels)
    rmac_regions = rmac_regions.astype(np.int)
    nr = len(rmac_regions)

    rmac_descriptors = []
    for x0, y0, w, h in rmac_regions:
        desc = features[:, :, y0:y0 + h, x0:x0 + w]
        desc = torch.max(desc, 2, keepdim=True)[0]
        desc = torch.max(desc, 3, keepdim=True)[0]
        # insert an additional dimension for the cat to work
        rmac_descriptors.append(desc.view(-1, 1, nc))

    rmac_descriptors = torch.cat(rmac_descriptors, 1)

    rmac_descriptors = normalize_L2(rmac_descriptors, 2)

    if pca is None:
        return rmac_descriptors

    # PCA + whitening
    npca = pca.n_components
    rmac_descriptors = pca.apply(rmac_descriptors.view(nr * nim, nc))
    rmac_descriptors = normalize_L2(rmac_descriptors, 1)

    rmac_descriptors = rmac_descriptors.view(nim, nr, npca)

    # Sum aggregation and L2-normalization
    rmac_descriptors = torch.sum(rmac_descriptors, 1)
    rmac_descriptors = normalize_L2(rmac_descriptors, 1)
    return rmac_descriptors