vlm-demo / serve /__init__.py
abalakrishnaTRI's picture
support fused backbones and update MODEL_ID_TO_NAME
6ba6dce
raw
history blame
No virus
1.57 kB
from collections import OrderedDict
# Arrange keys in display priority order (high --> low)
MODEL_ID_TO_NAME = OrderedDict(
[
(
"llava-lvis4v-lrv+redux-lvis4v-lrv-resize-naive-dinosiglip-vit-so-14-384px-no-align+13b+stage-finetune+x7",
"PrismaticVLM 13B - Chat",
),
(
"llava-lvis4v-lrv+redux-lvis4v-lrv-resize-naive-dinosiglip-vit-so-14-384px-no-align+7b+stage-finetune+x7",
"PrismaticVLM 7B - Chat",
),
(
"llava-lvis4v-lrv+redux-lvis4v-lrv-resize-naive-dinosiglip-vit-so-14-384px-no-align-llama2pure+13b+stage-finetune+x7",
"PrismaticVLM 13B",
),
(
"llava-lvis4v-lrv+redux-lvis4v-lrv-resize-naive-dinosiglip-vit-so-14-384px-no-align-llama2pure+7b+stage-finetune+x7",
"PrismaticVLM 7B",
),
(
"redux-resize-naive-dinosiglip-vit-so-14-384px-no-align-llama2pure+13b+stage-finetune+x7",
"PrismaticVLM 13B (Controlled)",
),
(
"redux-resize-naive-dinosiglip-vit-so-14-384px-no-align-llama2pure+7b+stage-finetune+x7",
"PrismaticVLM 7B (Controlled)",
),
("llava-v1.5-13b", "LLaVA 1.5: 13B"),
("llava-v1.5-7b", "LLaVA 1.5: 7B"),
]
)
INTERACTION_MODES_MAP = OrderedDict(
[
("Chat", "chat"),
("Captioning", "captioning"),
("Bounding Box Prediction", "bbox_pred"),
("Visual Question Answering", "vqa"),
("True/False Visual Question Answering", "true_false"),
]
)