Update chatbot.py
Browse files- chatbot.py +3 -3
chatbot.py
CHANGED
@@ -27,14 +27,14 @@ import io # Add this import for working with image bytes
|
|
27 |
|
28 |
# You can also use models that are commented below
|
29 |
# model_id = "llava-hf/llava-interleave-qwen-0.5b-hf"
|
30 |
-
model_id = "llava-hf/llava-interleave-qwen-7b-hf"
|
31 |
-
|
32 |
processor = LlavaProcessor.from_pretrained(model_id)
|
33 |
model = LlavaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.float16, use_flash_attention_2=True, low_cpu_mem_usage=True)
|
34 |
model.to("cuda")
|
35 |
# Credit to merve for code of llava interleave qwen
|
36 |
|
37 |
-
def sample_frames(video_file
|
38 |
try:
|
39 |
video = cv2.VideoCapture(video_file)
|
40 |
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
|
27 |
|
28 |
# You can also use models that are commented below
|
29 |
# model_id = "llava-hf/llava-interleave-qwen-0.5b-hf"
|
30 |
+
# model_id = "llava-hf/llava-interleave-qwen-7b-hf"
|
31 |
+
model_id = "llava-hf/llava-interleave-qwen-7b-dpo-hf"
|
32 |
processor = LlavaProcessor.from_pretrained(model_id)
|
33 |
model = LlavaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.float16, use_flash_attention_2=True, low_cpu_mem_usage=True)
|
34 |
model.to("cuda")
|
35 |
# Credit to merve for code of llava interleave qwen
|
36 |
|
37 |
+
def sample_frames(video_file) :
|
38 |
try:
|
39 |
video = cv2.VideoCapture(video_file)
|
40 |
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
|