Upload generate_prompt.py with huggingface_hub
Browse files- generate_prompt.py +3 -3
generate_prompt.py
CHANGED
@@ -87,9 +87,9 @@ for batch_idx in tqdm(range(begin, end, batch_size)):
|
|
87 |
|
88 |
data_list.append(messages)
|
89 |
save_list.append(save_)
|
90 |
-
print(len(data_list))
|
91 |
text = processor.apply_chat_template(data_list, tokenize=False, add_generation_prompt=True)
|
92 |
-
print(len(text))
|
93 |
image_inputs, video_inputs = process_vision_info(data_list)
|
94 |
inputs = processor(
|
95 |
text=[text],
|
@@ -102,7 +102,7 @@ for batch_idx in tqdm(range(begin, end, batch_size)):
|
|
102 |
|
103 |
# Inference: Generation of the output
|
104 |
generated_ids = model.generate(**inputs, max_new_tokens=128)
|
105 |
-
print(generated_ids.shape)
|
106 |
generated_ids_trimmed = [
|
107 |
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
108 |
]
|
|
|
87 |
|
88 |
data_list.append(messages)
|
89 |
save_list.append(save_)
|
90 |
+
#print(len(data_list))
|
91 |
text = processor.apply_chat_template(data_list, tokenize=False, add_generation_prompt=True)
|
92 |
+
#print(len(text))
|
93 |
image_inputs, video_inputs = process_vision_info(data_list)
|
94 |
inputs = processor(
|
95 |
text=[text],
|
|
|
102 |
|
103 |
# Inference: Generation of the output
|
104 |
generated_ids = model.generate(**inputs, max_new_tokens=128)
|
105 |
+
#print(generated_ids.shape)
|
106 |
generated_ids_trimmed = [
|
107 |
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
108 |
]
|