Spaces:
Runtime error
Runtime error
update cap
Browse files
app.py
CHANGED
|
@@ -18,7 +18,7 @@ from PIL import Image
|
|
| 18 |
from huggingface_hub import hf_hub_download, login
|
| 19 |
|
| 20 |
from open_flamingo.src.factory import create_model_and_transforms
|
| 21 |
-
from open_flamingo.chat.conversation import
|
| 22 |
|
| 23 |
sys.path.append(str(Path(__file__).parent.parent.parent))
|
| 24 |
TEMP_FILE_DIR = Path(__file__).parent / 'temp'
|
|
@@ -63,7 +63,7 @@ if "vision_encoder.logit_scale" in model_state_dict:
|
|
| 63 |
del model_state_dict["vision_encoder.visual.ln_post.weight"]
|
| 64 |
del model_state_dict["vision_encoder.visual.ln_post.bias"]
|
| 65 |
flamingo.load_state_dict(model_state_dict, strict=True)
|
| 66 |
-
chat =
|
| 67 |
|
| 68 |
|
| 69 |
def get_outputs(
|
|
|
|
| 18 |
from huggingface_hub import hf_hub_download, login
|
| 19 |
|
| 20 |
from open_flamingo.src.factory import create_model_and_transforms
|
| 21 |
+
from open_flamingo.chat.conversation import ChatBOT, CONV_VISION
|
| 22 |
|
| 23 |
sys.path.append(str(Path(__file__).parent.parent.parent))
|
| 24 |
TEMP_FILE_DIR = Path(__file__).parent / 'temp'
|
|
|
|
| 63 |
del model_state_dict["vision_encoder.visual.ln_post.weight"]
|
| 64 |
del model_state_dict["vision_encoder.visual.ln_post.bias"]
|
| 65 |
flamingo.load_state_dict(model_state_dict, strict=True)
|
| 66 |
+
chat = ChatBOT(flamingo, image_processor, tokenizer, vis_embed_size,model_name)
|
| 67 |
|
| 68 |
|
| 69 |
def get_outputs(
|
multimodal/open_flamingo/chat/__init__.py
CHANGED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
multimodal/open_flamingo/chat/conversation.py
CHANGED
|
@@ -275,7 +275,7 @@ def preprocess_image(sample, image_processor):
|
|
| 275 |
return image
|
| 276 |
|
| 277 |
|
| 278 |
-
class
|
| 279 |
def __init__(self, model, vis_processor, tokenizer, vis_embed_size,model_name):
|
| 280 |
self.model = model
|
| 281 |
self.vis_processor = vis_processor
|
|
|
|
| 275 |
return image
|
| 276 |
|
| 277 |
|
| 278 |
+
class ChatBOT:
|
| 279 |
def __init__(self, model, vis_processor, tokenizer, vis_embed_size,model_name):
|
| 280 |
self.model = model
|
| 281 |
self.vis_processor = vis_processor
|