in src/speech_reps/models/wav2vec2.py [0:0]
def __init__(self, fname):
super().__init__()
checkpoint = torch.load(fname, map_location=torch.device('cpu'))
self.args = checkpoint["args"]
model = Wav2Vec2Model.build_model(self.args, None)
model.load_state_dict(checkpoint["model"])
model.eval()
self.model = model