in src/hyperpod_nemo_adapter/collections/data/vision_dataset.py [0:0]
def tokenize_dialogs(dialogs, images, processor):
text_prompt = processor.apply_chat_template(dialogs)
batch = processor(images=images, text=text_prompt, padding=True, return_tensors="pt")
label_list = []
for i in range(len(batch["input_ids"])):
dialog_tokens = batch["input_ids"][i].tolist()
labels = copy.copy(dialog_tokens)
eot_indices = [i for i, n in enumerate(labels) if n == 128009]
last_idx = 0
# system prompt header "<|start_header_id|>system<|end_header_id|>" has been tokenized to [128006, 9125, 128007]
# user prompt header "<|start_header_id|>user<|end_header_id|>" has been tokenized to [128006, 882, 128007]
prompt_header_seqs = [[128006, 9125, 128007], [128006, 882, 128007]]
for n, idx in enumerate(eot_indices):
current_seq = labels[last_idx : idx + 1]
if check_header(prompt_header_seqs, current_seq):
# found prompt header, indicating that this seq should be masked
labels[last_idx : idx + 1] = [-100] * (idx - last_idx + 1)
else:
last_idx = idx + 1
# Mask all the assistant header prompt <|start_header_id|>assistant<|end_header_id|>, which has been tokenized to [128006, 78191, 128007]
assistant_header_seq = [128006, 78191, 128007]
labels = replace_target(assistant_header_seq, labels)
# Mask the padding token and image token 128256
for i in range(len(labels)):
if labels[i] == processor.tokenizer.pad_token_id or labels[i] == 128256: # 128256 is image token index
labels[i] = -100
label_list.append(labels)
batch["labels"] = torch.tensor(label_list)
return batch