def warn_no_gradient()

in ppo_ewma/torch_util.py [0:0]


def warn_no_gradient(model, task):
    for n, p in model.named_parameters():
        if p.grad is None:
            print(f"parameter '{n}' {p.shape} has no gradient for '{task}'")