def freeze_layers()

in torchmoji/finetuning.py [0:0]


def freeze_layers(model, unfrozen_types=[], unfrozen_keyword=None):
    """ Freezes all layers in the given model, except for ones that are
        explicitly specified to not be frozen.

    # Arguments:
        model: Model whose layers should be modified.
        unfrozen_types: List of layer types which shouldn't be frozen.
        unfrozen_keyword: Name keywords of layers that shouldn't be frozen.

    # Returns:
        Model with the selected layers frozen.
    """
    # Get trainable modules
    trainable_modules = [(n, m) for n, m in model.named_children() if len([id(p) for p in m.parameters()]) !=  0]
    for name, module in trainable_modules:
        trainable = (any(typ in str(module) for typ in unfrozen_types) or
                     (unfrozen_keyword is not None and unfrozen_keyword.lower() in name.lower()))
        change_trainable(module, trainable, verbose=False)
    return model