in quant/binary/weight_quantization.py [0:0]
def forward(self, x: torch.Tensor) -> torch.Tensor: # type: ignore
"""Forward pass of greedy foldable quantizer with `k`-bits."""
if self.training:
vs, x_q = quantization.quantizer_gf(x, k=self.k)
for i in range(self.k):
getattr(self, f'v{i+1}').copy_(vs[i])
else:
vs = [getattr(self, f'v{i+1}') for i in range(self.k)]
_, x_q = quantization.quantizer_gf(x, k=self.k, vs=vs)
return x_q