feat: llava-original
Browse files- gradio_web_server.log +0 -0
- gradio_web_server_multi.log +0 -0
- serve_images/2a9266044ab15516e8c0ba1a75f2b1e5.jpg +0 -0
- src/__pycache__/__init__.cpython-310.pyc +0 -0
- src/__pycache__/constants.cpython-310.pyc +0 -0
- src/__pycache__/conversation.cpython-310.pyc +0 -0
- src/__pycache__/utils.cpython-310.pyc +0 -0
- src/conversation.py +37 -1
- src/model/__pycache__/__init__.cpython-310.pyc +0 -0
- src/model/__pycache__/compression.cpython-310.pyc +0 -0
- src/model/__pycache__/llama_condense_monkey_patch.cpython-310.pyc +0 -0
- src/model/__pycache__/model_adapter.cpython-310.pyc +0 -0
- src/model/__pycache__/model_chatglm.cpython-310.pyc +0 -0
- src/model/__pycache__/model_cllm.cpython-310.pyc +0 -0
- src/model/__pycache__/model_codet5p.cpython-310.pyc +0 -0
- src/model/__pycache__/model_exllama.cpython-310.pyc +0 -0
- src/model/__pycache__/model_falcon.cpython-310.pyc +0 -0
- src/model/__pycache__/model_registry.cpython-310.pyc +0 -0
- src/model/__pycache__/model_xfastertransformer.cpython-310.pyc +0 -0
- src/model/__pycache__/model_yuan2.cpython-310.pyc +0 -0
- src/model/__pycache__/monkey_patch_non_inplace.cpython-310.pyc +0 -0
- src/model/model_adapter.py +7 -3
- src/model/model_llava.py +8 -1
- src/modules/__pycache__/__init__.cpython-310.pyc +0 -0
- src/modules/__pycache__/awq.cpython-310.pyc +0 -0
- src/modules/__pycache__/exllama.cpython-310.pyc +0 -0
- src/modules/__pycache__/gptq.cpython-310.pyc +0 -0
- src/modules/__pycache__/xfastertransformer.cpython-310.pyc +0 -0
- src/serve/__pycache__/__init__.cpython-310.pyc +0 -0
- src/serve/__pycache__/api_provider.cpython-310.pyc +0 -0
- src/serve/__pycache__/gradio_block_arena_named.cpython-310.pyc +0 -0
- src/serve/__pycache__/gradio_block_arena_vision.cpython-310.pyc +0 -0
- src/serve/__pycache__/gradio_block_arena_vision_named.cpython-310.pyc +0 -0
- src/serve/__pycache__/gradio_web_server.cpython-310.pyc +0 -0
- src/serve/__pycache__/remote_logger.cpython-310.pyc +0 -0
- src/serve/gradio_web_server.py +6 -25
- vision-tmp-2024-07-10-conv.json +2 -0
gradio_web_server.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
gradio_web_server_multi.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
serve_images/2a9266044ab15516e8c0ba1a75f2b1e5.jpg
ADDED
src/__pycache__/__init__.cpython-310.pyc
CHANGED
Binary files a/src/__pycache__/__init__.cpython-310.pyc and b/src/__pycache__/__init__.cpython-310.pyc differ
|
|
src/__pycache__/constants.cpython-310.pyc
CHANGED
Binary files a/src/__pycache__/constants.cpython-310.pyc and b/src/__pycache__/constants.cpython-310.pyc differ
|
|
src/__pycache__/conversation.cpython-310.pyc
CHANGED
Binary files a/src/__pycache__/conversation.cpython-310.pyc and b/src/__pycache__/conversation.cpython-310.pyc differ
|
|
src/__pycache__/utils.cpython-310.pyc
CHANGED
Binary files a/src/__pycache__/utils.cpython-310.pyc and b/src/__pycache__/utils.cpython-310.pyc differ
|
|
src/conversation.py
CHANGED
@@ -11,7 +11,7 @@ from enum import auto, IntEnum
|
|
11 |
from io import BytesIO
|
12 |
import os
|
13 |
from typing import List, Any, Dict, Union, Tuple
|
14 |
-
|
15 |
|
16 |
class SeparatorStyle(IntEnum):
|
17 |
"""Separator styles."""
|
@@ -165,6 +165,10 @@ class Conversation:
|
|
165 |
ret += ""
|
166 |
for i, (role, message) in enumerate(self.messages):
|
167 |
if message:
|
|
|
|
|
|
|
|
|
168 |
ret += f"<|start_header_id|>{role}<|end_header_id|>\n\n"
|
169 |
ret += f"{message.strip()}<|eot_id|>"
|
170 |
else:
|
@@ -2083,6 +2087,38 @@ register_conv_template(
|
|
2083 |
)
|
2084 |
)
|
2085 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2086 |
|
2087 |
if __name__ == "__main__":
|
2088 |
from src.conversation import get_conv_template
|
|
|
11 |
from io import BytesIO
|
12 |
import os
|
13 |
from typing import List, Any, Dict, Union, Tuple
|
14 |
+
from loguru import logger
|
15 |
|
16 |
class SeparatorStyle(IntEnum):
|
17 |
"""Separator styles."""
|
|
|
165 |
ret += ""
|
166 |
for i, (role, message) in enumerate(self.messages):
|
167 |
if message:
|
168 |
+
logger.info("msg={}", message)
|
169 |
+
if type(message) is tuple:
|
170 |
+
message, images = message
|
171 |
+
message = "<image>" * len(images) + message
|
172 |
ret += f"<|start_header_id|>{role}<|end_header_id|>\n\n"
|
173 |
ret += f"{message.strip()}<|eot_id|>"
|
174 |
else:
|
|
|
2087 |
)
|
2088 |
)
|
2089 |
|
2090 |
+
# register for fire
|
2091 |
+
conv_llava_llama_3 = Conversation(
|
2092 |
+
name="llava-original",
|
2093 |
+
system_message="You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.",
|
2094 |
+
roles=("user\n\n",
|
2095 |
+
"assistant\n\n"),
|
2096 |
+
# version="llama3",
|
2097 |
+
messages=[],
|
2098 |
+
offset=0,
|
2099 |
+
sep_style=SeparatorStyle.LLAMA3,
|
2100 |
+
system_template="<|start_header_id|>system<|end_header_id|>\n\n{system_message}<|eot_id|>",
|
2101 |
+
sep="",
|
2102 |
+
stop_str="<|eot_id|>",
|
2103 |
+
stop_token_ids=[128001, 128009],
|
2104 |
+
)
|
2105 |
+
register_conv_template(conv_llava_llama_3)
|
2106 |
+
|
2107 |
+
conv_llava_llama_3_student = Conversation(
|
2108 |
+
name="llava-fire",
|
2109 |
+
system_message="You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.",
|
2110 |
+
roles=("user",
|
2111 |
+
"assistant"),
|
2112 |
+
# version="llama_v3_student",
|
2113 |
+
messages=[],
|
2114 |
+
system_template="<|start_header_id|>system<|end_header_id|>\n\n{system_message}<|eot_id|>",
|
2115 |
+
offset=0,
|
2116 |
+
sep_style=SeparatorStyle.LLAMA3,
|
2117 |
+
sep="",
|
2118 |
+
stop_str="<|eot_id|>",
|
2119 |
+
stop_token_ids=[128001, 128009],
|
2120 |
+
)
|
2121 |
+
register_conv_template(conv_llava_llama_3_student)
|
2122 |
|
2123 |
if __name__ == "__main__":
|
2124 |
from src.conversation import get_conv_template
|
src/model/__pycache__/__init__.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/__init__.cpython-310.pyc and b/src/model/__pycache__/__init__.cpython-310.pyc differ
|
|
src/model/__pycache__/compression.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/compression.cpython-310.pyc and b/src/model/__pycache__/compression.cpython-310.pyc differ
|
|
src/model/__pycache__/llama_condense_monkey_patch.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/llama_condense_monkey_patch.cpython-310.pyc and b/src/model/__pycache__/llama_condense_monkey_patch.cpython-310.pyc differ
|
|
src/model/__pycache__/model_adapter.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/model_adapter.cpython-310.pyc and b/src/model/__pycache__/model_adapter.cpython-310.pyc differ
|
|
src/model/__pycache__/model_chatglm.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/model_chatglm.cpython-310.pyc and b/src/model/__pycache__/model_chatglm.cpython-310.pyc differ
|
|
src/model/__pycache__/model_cllm.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/model_cllm.cpython-310.pyc and b/src/model/__pycache__/model_cllm.cpython-310.pyc differ
|
|
src/model/__pycache__/model_codet5p.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/model_codet5p.cpython-310.pyc and b/src/model/__pycache__/model_codet5p.cpython-310.pyc differ
|
|
src/model/__pycache__/model_exllama.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/model_exllama.cpython-310.pyc and b/src/model/__pycache__/model_exllama.cpython-310.pyc differ
|
|
src/model/__pycache__/model_falcon.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/model_falcon.cpython-310.pyc and b/src/model/__pycache__/model_falcon.cpython-310.pyc differ
|
|
src/model/__pycache__/model_registry.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/model_registry.cpython-310.pyc and b/src/model/__pycache__/model_registry.cpython-310.pyc differ
|
|
src/model/__pycache__/model_xfastertransformer.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/model_xfastertransformer.cpython-310.pyc and b/src/model/__pycache__/model_xfastertransformer.cpython-310.pyc differ
|
|
src/model/__pycache__/model_yuan2.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/model_yuan2.cpython-310.pyc and b/src/model/__pycache__/model_yuan2.cpython-310.pyc differ
|
|
src/model/__pycache__/monkey_patch_non_inplace.cpython-310.pyc
CHANGED
Binary files a/src/model/__pycache__/monkey_patch_non_inplace.cpython-310.pyc and b/src/model/__pycache__/monkey_patch_non_inplace.cpython-310.pyc differ
|
|
src/model/model_adapter.py
CHANGED
@@ -45,7 +45,7 @@ from src.modules.exllama import ExllamaConfig, load_exllama_model
|
|
45 |
from src.modules.xfastertransformer import load_xft_model, XftConfig
|
46 |
from src.modules.gptq import GptqConfig, load_gptq_quantized
|
47 |
from src.utils import get_gpu_memory
|
48 |
-
|
49 |
# Check an environment variable to check if we should be sharing Peft model
|
50 |
# weights. When false we treat all Peft models as separate.
|
51 |
peft_share_base_weights = (
|
@@ -359,6 +359,7 @@ def load_model(
|
|
359 |
raise e
|
360 |
|
361 |
# Load model
|
|
|
362 |
model, tokenizer = adapter.load_model(model_path, kwargs)
|
363 |
|
364 |
if (
|
@@ -387,6 +388,7 @@ def load_model(
|
|
387 |
def get_conversation_template(model_path: str) -> Conversation:
|
388 |
"""Get the default conversation template."""
|
389 |
adapter = get_model_adapter(model_path)
|
|
|
390 |
return adapter.get_default_conv_template(model_path)
|
391 |
|
392 |
|
@@ -2294,12 +2296,14 @@ class LlavaAdapter(BaseModelAdapter):
|
|
2294 |
from loguru import logger
|
2295 |
logger.info("model_path {}", model_path)
|
2296 |
if model_path in ["llava-fire", "llava-original"]:
|
|
|
|
|
2297 |
from llava.conversation import conv_templates
|
2298 |
if model_path == "llava-fire":
|
2299 |
return conv_templates["llama_v3_student"].copy()
|
2300 |
else:
|
2301 |
-
return conv_templates["llama_v3"].copy()
|
2302 |
-
|
2303 |
model_path = model_path.lower()
|
2304 |
if "34b" in model_path:
|
2305 |
return get_conv_template("llava-chatml")
|
|
|
45 |
from src.modules.xfastertransformer import load_xft_model, XftConfig
|
46 |
from src.modules.gptq import GptqConfig, load_gptq_quantized
|
47 |
from src.utils import get_gpu_memory
|
48 |
+
from loguru import logger
|
49 |
# Check an environment variable to check if we should be sharing Peft model
|
50 |
# weights. When false we treat all Peft models as separate.
|
51 |
peft_share_base_weights = (
|
|
|
359 |
raise e
|
360 |
|
361 |
# Load model
|
362 |
+
logger.info("adapter {}", adapter)
|
363 |
model, tokenizer = adapter.load_model(model_path, kwargs)
|
364 |
|
365 |
if (
|
|
|
388 |
def get_conversation_template(model_path: str) -> Conversation:
|
389 |
"""Get the default conversation template."""
|
390 |
adapter = get_model_adapter(model_path)
|
391 |
+
logger.info("adapter {}", adapter)
|
392 |
return adapter.get_default_conv_template(model_path)
|
393 |
|
394 |
|
|
|
2296 |
from loguru import logger
|
2297 |
logger.info("model_path {}", model_path)
|
2298 |
if model_path in ["llava-fire", "llava-original"]:
|
2299 |
+
return get_conv_template(model_path)
|
2300 |
+
'''if model_path in ["llava-fire", "llava-original"]:
|
2301 |
from llava.conversation import conv_templates
|
2302 |
if model_path == "llava-fire":
|
2303 |
return conv_templates["llama_v3_student"].copy()
|
2304 |
else:
|
2305 |
+
return conv_templates["llama_v3"].copy()'''
|
2306 |
+
|
2307 |
model_path = model_path.lower()
|
2308 |
if "34b" in model_path:
|
2309 |
return get_conv_template("llava-chatml")
|
src/model/model_llava.py
CHANGED
@@ -10,6 +10,8 @@ import copy
|
|
10 |
import torch
|
11 |
from llava.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path
|
12 |
import spaces
|
|
|
|
|
13 |
#model_path = "/scratch/TecManDep/A_Models/llava-v1.6-vicuna-7b"
|
14 |
#conv_template = "vicuna_v1" # Make sure you use correct chat template for different models
|
15 |
|
@@ -60,8 +62,13 @@ def inference():
|
|
60 |
|
61 |
|
62 |
@spaces.GPU
|
63 |
-
def
|
64 |
device = "cuda"
|
|
|
|
|
|
|
|
|
|
|
65 |
image_tensor = process_images(images, image_processor_llava, model_llava.config)
|
66 |
image_tensor = image_tensor.to(dtype=torch.float16, device=device)
|
67 |
input_ids = tokenizer_image_token(prompt, tokenizer_llava, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(device)
|
|
|
10 |
import torch
|
11 |
from llava.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path
|
12 |
import spaces
|
13 |
+
from io import BytesIO
|
14 |
+
import base64
|
15 |
#model_path = "/scratch/TecManDep/A_Models/llava-v1.6-vicuna-7b"
|
16 |
#conv_template = "vicuna_v1" # Make sure you use correct chat template for different models
|
17 |
|
|
|
62 |
|
63 |
|
64 |
@spaces.GPU
|
65 |
+
def inference_by_prompt_and_images(prompt, images):
|
66 |
device = "cuda"
|
67 |
+
if len(images) > 0 and type(images[0]) is str:
|
68 |
+
image_data = []
|
69 |
+
for image in images:
|
70 |
+
image_data.append(Image.open(BytesIO(base64.b64decode(image))))
|
71 |
+
images = image_data
|
72 |
image_tensor = process_images(images, image_processor_llava, model_llava.config)
|
73 |
image_tensor = image_tensor.to(dtype=torch.float16, device=device)
|
74 |
input_ids = tokenizer_image_token(prompt, tokenizer_llava, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(device)
|
src/modules/__pycache__/__init__.cpython-310.pyc
CHANGED
Binary files a/src/modules/__pycache__/__init__.cpython-310.pyc and b/src/modules/__pycache__/__init__.cpython-310.pyc differ
|
|
src/modules/__pycache__/awq.cpython-310.pyc
CHANGED
Binary files a/src/modules/__pycache__/awq.cpython-310.pyc and b/src/modules/__pycache__/awq.cpython-310.pyc differ
|
|
src/modules/__pycache__/exllama.cpython-310.pyc
CHANGED
Binary files a/src/modules/__pycache__/exllama.cpython-310.pyc and b/src/modules/__pycache__/exllama.cpython-310.pyc differ
|
|
src/modules/__pycache__/gptq.cpython-310.pyc
CHANGED
Binary files a/src/modules/__pycache__/gptq.cpython-310.pyc and b/src/modules/__pycache__/gptq.cpython-310.pyc differ
|
|
src/modules/__pycache__/xfastertransformer.cpython-310.pyc
CHANGED
Binary files a/src/modules/__pycache__/xfastertransformer.cpython-310.pyc and b/src/modules/__pycache__/xfastertransformer.cpython-310.pyc differ
|
|
src/serve/__pycache__/__init__.cpython-310.pyc
CHANGED
Binary files a/src/serve/__pycache__/__init__.cpython-310.pyc and b/src/serve/__pycache__/__init__.cpython-310.pyc differ
|
|
src/serve/__pycache__/api_provider.cpython-310.pyc
CHANGED
Binary files a/src/serve/__pycache__/api_provider.cpython-310.pyc and b/src/serve/__pycache__/api_provider.cpython-310.pyc differ
|
|
src/serve/__pycache__/gradio_block_arena_named.cpython-310.pyc
CHANGED
Binary files a/src/serve/__pycache__/gradio_block_arena_named.cpython-310.pyc and b/src/serve/__pycache__/gradio_block_arena_named.cpython-310.pyc differ
|
|
src/serve/__pycache__/gradio_block_arena_vision.cpython-310.pyc
CHANGED
Binary files a/src/serve/__pycache__/gradio_block_arena_vision.cpython-310.pyc and b/src/serve/__pycache__/gradio_block_arena_vision.cpython-310.pyc differ
|
|
src/serve/__pycache__/gradio_block_arena_vision_named.cpython-310.pyc
CHANGED
Binary files a/src/serve/__pycache__/gradio_block_arena_vision_named.cpython-310.pyc and b/src/serve/__pycache__/gradio_block_arena_vision_named.cpython-310.pyc differ
|
|
src/serve/__pycache__/gradio_web_server.cpython-310.pyc
CHANGED
Binary files a/src/serve/__pycache__/gradio_web_server.cpython-310.pyc and b/src/serve/__pycache__/gradio_web_server.cpython-310.pyc differ
|
|
src/serve/__pycache__/remote_logger.cpython-310.pyc
CHANGED
Binary files a/src/serve/__pycache__/remote_logger.cpython-310.pyc and b/src/serve/__pycache__/remote_logger.cpython-310.pyc differ
|
|
src/serve/gradio_web_server.py
CHANGED
@@ -755,38 +755,19 @@ def get_model_description_md(models):
|
|
755 |
def build_about():
|
756 |
about_markdown = """
|
757 |
# About Us
|
758 |
-
|
759 |
-
We open-source our [FastChat](https://github.com/lm-sys/FastChat) project at GitHub and release chat and human feedback dataset. We invite everyone to join us!
|
760 |
-
|
761 |
## Arena Core Team
|
762 |
-
|
763 |
-
|
764 |
## Past Members
|
765 |
-
|
766 |
-
|
767 |
## Learn more
|
768 |
-
|
769 |
-
- LMSYS-Chat-1M dataset [paper](https://arxiv.org/abs/2309.11998), LLM Judge [paper](https://arxiv.org/abs/2306.05685)
|
770 |
|
771 |
## Contact Us
|
772 |
-
|
773 |
-
- File issues on [GitHub](https://github.com/lm-sys/FastChat)
|
774 |
-
- Download our datasets and models on [HuggingFace](https://huggingface.co/lmsys)
|
775 |
|
776 |
## Acknowledgment
|
777 |
-
|
778 |
-
We also thank [UC Berkeley SkyLab](https://sky.cs.berkeley.edu/), [Kaggle](https://www.kaggle.com/), [MBZUAI](https://mbzuai.ac.ae/), [a16z](https://www.a16z.com/), [Together AI](https://www.together.ai/), [Hyperbolic](https://hyperbolic.xyz/), [Anyscale](https://www.anyscale.com/), [HuggingFace](https://huggingface.co/) for their generous sponsorship. Learn more about partnership [here](https://lmsys.org/donations/).
|
779 |
-
|
780 |
-
<div class="sponsor-image-about">
|
781 |
-
<img src="https://storage.googleapis.com/public-arena-asset/skylab.png" alt="SkyLab">
|
782 |
-
<img src="https://storage.googleapis.com/public-arena-asset/kaggle.png" alt="Kaggle">
|
783 |
-
<img src="https://storage.googleapis.com/public-arena-asset/mbzuai.jpeg" alt="MBZUAI">
|
784 |
-
<img src="https://storage.googleapis.com/public-arena-asset/a16z.jpeg" alt="a16z">
|
785 |
-
<img src="https://storage.googleapis.com/public-arena-asset/together.png" alt="Together AI">
|
786 |
-
<img src="https://storage.googleapis.com/public-arena-asset/hyperbolic_logo.png" alt="Hyperbolic">
|
787 |
-
<img src="https://storage.googleapis.com/public-arena-asset/anyscale.png" alt="AnyScale">
|
788 |
-
<img src="https://storage.googleapis.com/public-arena-asset/huggingface.png" alt="HuggingFace">
|
789 |
-
</div>
|
790 |
"""
|
791 |
gr.Markdown(about_markdown, elem_id="about_markdown")
|
792 |
|
|
|
755 |
def build_about():
|
756 |
about_markdown = """
|
757 |
# About Us
|
758 |
+
Placeholder
|
|
|
|
|
759 |
## Arena Core Team
|
760 |
+
Placeholder
|
|
|
761 |
## Past Members
|
762 |
+
Placeholder
|
|
|
763 |
## Learn more
|
764 |
+
Placeholder
|
|
|
765 |
|
766 |
## Contact Us
|
767 |
+
Placeholder
|
|
|
|
|
768 |
|
769 |
## Acknowledgment
|
770 |
+
Placeholder
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
771 |
"""
|
772 |
gr.Markdown(about_markdown, elem_id="about_markdown")
|
773 |
|
vision-tmp-2024-07-10-conv.json
CHANGED
@@ -5,3 +5,5 @@
|
|
5 |
{"tstamp": 1720589062.1758, "type": "chat", "model": "llava-fire", "gen_params": {"temperature": 0.7, "top_p": 1.0, "max_new_tokens": 1024}, "start": 1720589047.9171, "finish": 1720589062.1758, "state": {"template_name": "vicuna_v1.1", "system_message": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.", "roles": ["USER", "ASSISTANT"], "messages": [["USER", "Hello"], ["ASSISTANT", "hello"]], "offset": 0, "conv_id": "963f15cd5e224eb8ae02c67ed37b93c4", "model_name": "llava-fire", "has_csam_image": false}, "ip": "46.3.240.105"}
|
6 |
{"tstamp": 1720589240.391, "type": "chat", "model": "llava-fire", "gen_params": {"temperature": 0.7, "top_p": 1.0, "max_new_tokens": 1024}, "start": 1720589221.915, "finish": 1720589240.391, "state": {"template_name": "vicuna_v1.1", "system_message": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.", "roles": ["USER", "ASSISTANT"], "messages": [["USER", "Hello"], ["ASSISTANT", "hello"]], "offset": 0, "conv_id": "062c8dbcf02d4779ad5cb89f836a7480", "model_name": "llava-fire", "has_csam_image": false}, "ip": "46.3.240.106"}
|
7 |
{"tstamp": 1720589240.3917, "type": "chat", "model": "llava-original", "gen_params": {"temperature": 0.7, "top_p": 1.0, "max_new_tokens": 1024}, "start": 1720589221.917, "finish": 1720589240.3917, "state": {"template_name": "vicuna_v1.1", "system_message": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.", "roles": ["USER", "ASSISTANT"], "messages": [["USER", "Hello"], ["ASSISTANT", "The figure in the image is a can of S\u00f6dra Almighy, which is a beer. S\u00f6dra is a Swedish brewery known for its lager beers. The can's design is modern and minimalist, with a color scheme that includes black and white, and the text is in English. The label indicates that it is a dry-hopped beer, which means it has been flavored with hops that have not been steeped in the brewing process, giving it a unique taste profile. The can's design suggests a contemporary and possibly craft beer, which is often associated with a more complex flavor profile than traditional lagers."]], "offset": 0, "conv_id": "3255dc27032345ba845813c17981f372", "model_name": "llava-original", "has_csam_image": false}, "ip": "46.3.240.106"}
|
|
|
|
|
|
5 |
{"tstamp": 1720589062.1758, "type": "chat", "model": "llava-fire", "gen_params": {"temperature": 0.7, "top_p": 1.0, "max_new_tokens": 1024}, "start": 1720589047.9171, "finish": 1720589062.1758, "state": {"template_name": "vicuna_v1.1", "system_message": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.", "roles": ["USER", "ASSISTANT"], "messages": [["USER", "Hello"], ["ASSISTANT", "hello"]], "offset": 0, "conv_id": "963f15cd5e224eb8ae02c67ed37b93c4", "model_name": "llava-fire", "has_csam_image": false}, "ip": "46.3.240.105"}
|
6 |
{"tstamp": 1720589240.391, "type": "chat", "model": "llava-fire", "gen_params": {"temperature": 0.7, "top_p": 1.0, "max_new_tokens": 1024}, "start": 1720589221.915, "finish": 1720589240.391, "state": {"template_name": "vicuna_v1.1", "system_message": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.", "roles": ["USER", "ASSISTANT"], "messages": [["USER", "Hello"], ["ASSISTANT", "hello"]], "offset": 0, "conv_id": "062c8dbcf02d4779ad5cb89f836a7480", "model_name": "llava-fire", "has_csam_image": false}, "ip": "46.3.240.106"}
|
7 |
{"tstamp": 1720589240.3917, "type": "chat", "model": "llava-original", "gen_params": {"temperature": 0.7, "top_p": 1.0, "max_new_tokens": 1024}, "start": 1720589221.917, "finish": 1720589240.3917, "state": {"template_name": "vicuna_v1.1", "system_message": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.", "roles": ["USER", "ASSISTANT"], "messages": [["USER", "Hello"], ["ASSISTANT", "The figure in the image is a can of S\u00f6dra Almighy, which is a beer. S\u00f6dra is a Swedish brewery known for its lager beers. The can's design is modern and minimalist, with a color scheme that includes black and white, and the text is in English. The label indicates that it is a dry-hopped beer, which means it has been flavored with hops that have not been steeped in the brewing process, giving it a unique taste profile. The can's design suggests a contemporary and possibly craft beer, which is often associated with a more complex flavor profile than traditional lagers."]], "offset": 0, "conv_id": "3255dc27032345ba845813c17981f372", "model_name": "llava-original", "has_csam_image": false}, "ip": "46.3.240.106"}
|
8 |
+
{"tstamp": 1720593211.0342, "type": "chat", "model": "llava-fire", "gen_params": {"temperature": 0.7, "top_p": 1.0, "max_new_tokens": 1024}, "start": 1720593197.2799, "finish": 1720593211.0342, "state": {"template_name": "llava-fire", "system_message": "You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.", "roles": ["user", "assistant"], "messages": [["user", ["Describe the image", ["2a9266044ab15516e8c0ba1a75f2b1e5"]]], ["assistant", "hello"]], "offset": 0, "conv_id": "f97c281c9d114ae7800b56eb204726fc", "model_name": "llava-fire", "has_csam_image": false}, "ip": "46.3.240.104"}
|
9 |
+
{"tstamp": 1720593211.0433, "type": "chat", "model": "llava-original", "gen_params": {"temperature": 0.7, "top_p": 1.0, "max_new_tokens": 1024}, "start": 1720593197.2821, "finish": 1720593211.0433, "state": {"template_name": "llava-original", "system_message": "You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.", "roles": ["user\n\n", "assistant\n\n"], "messages": [["user\n\n", ["Describe the image", ["2a9266044ab15516e8c0ba1a75f2b1e5"]]], ["assistant\n\n", "The image captures a moment of proposal between two individuals. The man, dressed in a classic gray suit, is standing upright with his arms outstretched, holding a ring in his left hand. His posture and facial expression suggest he is in the act of proposing. The woman, wearing a vibrant red dress, is kneeling on the ground with her hands clasped together, indicating her readiness to accept the proposal. The simplicity of the background focuses the viewer's attention on the interaction between the two characters. The image is devoid of any text or additional context, allowing the viewer to focus solely on the emotional exchange between the man and the woman."]], "offset": 0, "conv_id": "4731a06b55954bd8bfa659f1c3f9cc91", "model_name": "llava-original", "has_csam_image": false}, "ip": "46.3.240.104"}
|