Spaces:
Runtime error
Runtime error
Merge branch 'main' of https://huggingface.co/spaces/OpenGVLab/VideoChatGPT
Browse files- README.md +1 -1
- configs/config.json +1 -1
- models/videochat.py +5 -2
README.md
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
emoji: π
|
4 |
colorFrom: green
|
5 |
colorTo: blue
|
|
|
1 |
---
|
2 |
+
title: 'VideoChat: Chat-Centric Video Understanding'
|
3 |
emoji: π
|
4 |
colorFrom: green
|
5 |
colorTo: blue
|
configs/config.json
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
"vit_model": "eva_clip_g",
|
4 |
"vit_model_path": "model/eva_vit_g.pth",
|
5 |
"q_former_model_path": "model/blip2_pretrained_flant5xxl.pth",
|
6 |
-
"llama_model_path": "
|
7 |
"videochat_model_path": "model/videochat.pth",
|
8 |
"img_size": 224,
|
9 |
"num_query_token": 32,
|
|
|
3 |
"vit_model": "eva_clip_g",
|
4 |
"vit_model_path": "model/eva_vit_g.pth",
|
5 |
"q_former_model_path": "model/blip2_pretrained_flant5xxl.pth",
|
6 |
+
"llama_model_path": "ynhe/stable-vicuna-13b",
|
7 |
"videochat_model_path": "model/videochat.pth",
|
8 |
"img_size": 224,
|
9 |
"num_query_token": 32,
|
models/videochat.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import random
|
2 |
import logging
|
3 |
|
@@ -109,7 +110,7 @@ class VideoChat(Blip2Base):
|
|
109 |
print('Loading Q-Former Done')
|
110 |
|
111 |
print('Loading LLAMA')
|
112 |
-
self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model_path, use_fast=False)
|
113 |
self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token
|
114 |
|
115 |
if self.low_resource:
|
@@ -117,12 +118,14 @@ class VideoChat(Blip2Base):
|
|
117 |
llama_model_path,
|
118 |
torch_dtype=torch.float16,
|
119 |
load_in_8bit=True,
|
120 |
-
device_map="auto"
|
|
|
121 |
)
|
122 |
else:
|
123 |
self.llama_model = LlamaForCausalLM.from_pretrained(
|
124 |
llama_model_path,
|
125 |
torch_dtype=torch.float16,
|
|
|
126 |
)
|
127 |
|
128 |
print("freeze LLAMA")
|
|
|
1 |
+
import os
|
2 |
import random
|
3 |
import logging
|
4 |
|
|
|
110 |
print('Loading Q-Former Done')
|
111 |
|
112 |
print('Loading LLAMA')
|
113 |
+
self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model_path, use_fast=False, use_auth_token=os.environ["HF_TOKEN"])
|
114 |
self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token
|
115 |
|
116 |
if self.low_resource:
|
|
|
118 |
llama_model_path,
|
119 |
torch_dtype=torch.float16,
|
120 |
load_in_8bit=True,
|
121 |
+
device_map="auto",
|
122 |
+
use_auth_token=os.environ["HF_TOKEN"],
|
123 |
)
|
124 |
else:
|
125 |
self.llama_model = LlamaForCausalLM.from_pretrained(
|
126 |
llama_model_path,
|
127 |
torch_dtype=torch.float16,
|
128 |
+
use_auth_token=os.environ["HF_TOKEN"],
|
129 |
)
|
130 |
|
131 |
print("freeze LLAMA")
|