in trainers/catex.py [0:0]
def forward(self, perturb='none', ood_prompt=False, ood_prompt_idx=None):
# ctx = self.ctx
if ood_prompt:
assert perturb == 'none', perturb
if ood_prompt_idx is None:
assert self.cfg.TRAINER.OOD_PROMPT_NUM == 1
ctx = self.ctx_ood
else:
ctx = self.ctx_ood[ood_prompt_idx]
else:
ctx = self.perturb_prompt(perturb)
if ctx.dim() == 2:
ctx = ctx.unsqueeze(0).expand(self.n_cls, -1, -1)
if self.ctx_cm is not None:
ctx_cm = self.ctx_cm.expand(self.n_cls, -1, -1)
ctx = torch.cat((ctx_cm, ctx), dim=1)
prefix = self.token_prefix
suffix = self.token_suffix
if self.class_token_position == "end":
prompts = torch.cat(
[
prefix, # (n_cls, 1, dim)
ctx, # (n_cls, n_ctx, dim)
suffix, # (n_cls, *, dim)
],
dim=1,
)
elif self.class_token_position == "middle":
half_n_ctx = self.n_ctx // 2
prompts = []
for i in range(self.n_cls):
name_len = self.name_lens[i]
prefix_i = prefix[i : i + 1, :, :]
class_i = suffix[i : i + 1, :name_len, :]
suffix_i = suffix[i : i + 1, name_len:, :]
ctx_i_half1 = ctx[i : i + 1, :half_n_ctx, :]
ctx_i_half2 = ctx[i : i + 1, half_n_ctx:, :]
prompt = torch.cat(
[
prefix_i, # (1, 1, dim)
ctx_i_half1, # (1, n_ctx//2, dim)
class_i, # (1, name_len, dim)
ctx_i_half2, # (1, n_ctx//2, dim)
suffix_i, # (1, *, dim)
],
dim=1,
)
prompts.append(prompt)
prompts = torch.cat(prompts, dim=0)
elif self.class_token_position == "front":
prompts = []
for i in range(self.n_cls):
name_len = self.name_lens[i]
prefix_i = prefix[i : i + 1, :, :]
class_i = suffix[i : i + 1, :name_len, :]
suffix_i = suffix[i : i + 1, name_len:, :]
ctx_i = ctx[i : i + 1, :, :]
prompt = torch.cat(
[
prefix_i, # (1, 1, dim)
class_i, # (1, name_len, dim)
ctx_i, # (1, n_ctx, dim)
suffix_i, # (1, *, dim)
],
dim=1,
)
prompts.append(prompt)
prompts = torch.cat(prompts, dim=0)
else:
raise ValueError
return prompts