GLM-4-DOC / app.py
vilarin's picture
Update app.py
ab89095 verified
raw
history blame
No virus
4.56 kB
from PIL import Image
import gradio as gr
import spaces
import os
from huggingface_hub import hf_hub_download
import base64
from llama_cpp import Llama
from llama_cpp.llama_chat_format import MoondreamChatHandler
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
MODEL_LIST = ["openbmb/MiniCPM-Llama3-V-2_5","openbmb/MiniCPM-Llama3-V-2_5-int4"]
HF_TOKEN = os.environ.get("HF_TOKEN", None)
MODEL_ID = os.environ.get("MODEL_ID")
MODEL_NAME = MODEL_ID.split("/")[-1]
TITLE = "<h1><center>VL-Chatbox</center></h1>"
DESCRIPTION = f'<h3><center>MODEL: <a href="https://hf.co/{MODEL_ID}">{MODEL_NAME}</a></center></h3>'
CSS = """
.duplicate-button {
margin: auto !important;
color: white !important;
background: black !important;
border-radius: 100vh !important;
}
"""
chat_handler = MoondreamChatHandler.from_pretrained(
repo_id="openbmb/MiniCPM-Llama3-V-2_5-gguf",
filename="*mmproj*",
)
llm = Llama.from_pretrained(
repo_id="openbmb/MiniCPM-Llama3-V-2_5-gguf",
filename="ggml-model-Q5_K_M.gguf",
chat_handler=chat_handler,
n_ctx=2048, # n_ctx should be increased to accommodate the image embedding
)
@spaces.GPU(queue=False)
def stream_chat(message, history: list, temperature: float, max_new_tokens: int):
print(f'message is - {message}')
print(f'history is - {history}')
messages = []
if message["files"]:
image = Image.open(message["files"][-1]).convert('RGB')
messages.append({
"role": "user",
"content": [
{"type": "text", "text": message['text']},
{"type": "image_url", "image_url":{"url": image}}
]
})
else:
if len(history) == 0:
raise gr.Error("Please upload an image first.")
image = None
else:
image = Image.open(history[0][0][0])
for prompt, answer in history:
if answer is None:
messages.extend([{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{"type": "image_url", "image_url": {"url": image}}
]
},{
"role": "assistant",
"content": ""
}])
else:
messages.extend([{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{"type": "image_url", "image_url": {"url": image}}
]
}, {
"role": "assistant",
"content": answer
}])
messages.append({"role": "user", "content": message['text']})
print(f"Messages is -\n{messages}")
response = llm.create_chat_completion(
messages = messages,
temperature=temperature,
max_tokens=max_new_tokens,
stream=True
)
return response["choices"][0]["text"]
chatbot = gr.Chatbot(height=450)
chat_input = gr.MultimodalTextbox(
interactive=True,
file_types=["image"],
placeholder="Enter message or upload file...",
show_label=False,
)
EXAMPLES = [
[{"text": "What is on the desk?", "files": ["./laptop.jpg"]}],
[{"text": "Where it is?", "files": ["./hotel.jpg"]}],
[{"text": "Can yo describe this image?", "files": ["./spacecat.png"]}]
]
with gr.Blocks(css=CSS) as demo:
gr.HTML(TITLE)
gr.HTML(DESCRIPTION)
gr.DuplicateButton(value="Duplicate Space for private use", elem_classes="duplicate-button")
gr.ChatInterface(
fn=stream_chat,
multimodal=True,
textbox=chat_input,
chatbot=chatbot,
fill_height=True,
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
additional_inputs=[
gr.Slider(
minimum=0,
maximum=1,
step=0.1,
value=0.8,
label="Temperature",
render=False,
),
gr.Slider(
minimum=128,
maximum=4096,
step=1,
value=1024,
label="Max new tokens",
render=False,
),
],
),
gr.Examples(EXAMPLES,[chat_input])
if __name__ == "__main__":
demo.queue(api_open=False).launch(show_api=False, share=False)