|
import gradio as gr |
|
from gradio_client import Client, handle_file |
|
from huggingface_hub import InferenceClient |
|
|
|
moondream_client = Client("vikhyatk/moondream2") |
|
|
|
llama_client = InferenceClient("Qwen/QwQ-32B-Preview") |
|
|
|
history = [] |
|
|
|
def describe_image(image, user_message): |
|
global history |
|
|
|
result = moondream_client.predict( |
|
img=handle_file(image), |
|
prompt="Describe this image.", |
|
api_name="/answer_question" |
|
) |
|
|
|
description = result |
|
|
|
history.append(f"User: {user_message}") |
|
history.append(f"Assistant: {description}") |
|
|
|
full_conversation = "\n".join(history) |
|
llama_result = llama_client.chat_completion( |
|
messages=[{"role": "user", "content": full_conversation}], |
|
max_tokens=512, |
|
temperature=0.7, |
|
top_p=0.95 |
|
) |
|
|
|
return description + "\n\nAssistant: " + llama_result['choices'][0]['message']['content'] |
|
|
|
def chat_or_image(image, user_message): |
|
global history |
|
|
|
if image: |
|
return describe_image(image, user_message) |
|
else: |
|
history.append(f"User: {user_message}") |
|
full_conversation = "\n".join(history) |
|
llama_result = llama_client.chat_completion( |
|
messages=[{"role": "user", "content": full_conversation}], |
|
max_tokens=512, |
|
temperature=0.7, |
|
top_p=0.95 |
|
) |
|
return llama_result['choices'][0]['message']['content'] |
|
|
|
demo = gr.Interface( |
|
fn=chat_or_image, |
|
inputs=[ |
|
gr.Image(type="filepath", label="Resim Yükle (isteğe bağlı)"), |
|
gr.Textbox(label="Soru Sor ya da Konuş", placeholder="Soru sor...", lines=2) |
|
], |
|
outputs="text", |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch(show_error=True) |