vlm-demo / serve /__init__.py
abalakrishnaTRI's picture
first commit
83cb829
raw
history blame
1.46 kB
from collections import OrderedDict
# Arrange keys in display priority order (high --> low)
MODEL_ID_TO_NAME = OrderedDict(
[
(
"llava-lvis4v-lrv+lvis4v-lrv-resize-naive-clip-vit-l-14-336px-no-align-2-epochs-llama2pure+13b+stage-finetune+x7",
"Prism-CLIP 13B",
),
(
"llava-lvis4v-lrv+lvis4v-lrv-resize-naive-clip-vit-l-14-336px-no-align-2-epochs-llama2pure+7b+stage-finetune+x7",
"Prism-CLIP 7B",
),
(
"resize-naive-clip-vit-l-14-336px-no-align-llama2pure+13b+stage-finetune+x7",
"Prism-CLIP 13B (Controlled)",
),
(
"resize-naive-clip-vit-l-14-336px-no-align-llama2pure+7b+stage-finetune+x7",
"Prism-CLIP 7B (Controlled)",
),
(
"resize-naive-clip-vit-l-14-336px-no-align+13b+stage-finetune+x7",
"Prism-CLIP 13B (Controlled) - Chat",
),
(
"resize-naive-clip-vit-l-14-336px-no-align+7b+stage-finetune+x7",
"Prism-CLIP 7B (Controlled) - Chat",
),
("llava-v1.5-7b", "LLaVA 1.5: 7B"),
("llava-v1.5-13b", "LLaVA 1.5: 13B"),
]
)
INTERACTION_MODES_MAP = OrderedDict(
[
("Chat", "chat"),
("Captioning", "captioning"),
("Bounding Box Prediction", "bbox_pred"),
("Visual Question Answering", "vqa"),
("True/False Visual Question Answering", "true_false"),
]
)