# import gradio as gr
#
# def yes_man(message, history):
#     if message.endswith("?"):
#         return "Yes"
#     else:
#         return "Ask me anything!"
#
# gr.ChatInterface(
#     yes_man,
#     chatbot=gr.Chatbot(height=300),
#     textbox=gr.Textbox(placeholder="Ask me a yes or no question", container=False, scale=7),
#     title="Yes Man",
#     description="Ask Yes Man any question",
#     theme="soft",
#     examples=["Hello", "Am I cool?", "Are tomatoes vegetables?"],
#     cache_examples=True,
#     retry_btn=None,
#     undo_btn="Delete Previous",
#     clear_btn="Clear",
#     multimodal=True,
# ).launch()

# import gradio as gr
# import time
#
# def count_files(message, history):
#     num_files = len(message["files"])
#     return f"You uploaded {num_files} files"
#
# demo = gr.ChatInterface(fn=count_files, examples=[{"text": "Hello", "files": []}], title="Echo Bot", multimodal=True)
#
# demo.launch()

# import gradio as gr
# import time
#
# def echo(message, history, system_prompt, tokens):
#     response = f"System prompt: {system_prompt}\n Message: {message}."
#     for i in range(min(len(response), int(tokens))):
#         time.sleep(0.05)
#         yield response[: i + 1]
#
# demo = gr.ChatInterface(
#     echo,
#     additional_inputs=[
#         gr.Textbox("You are helpful AI.", label="System Prompt"),
#         gr.Slider(10, 100),
#     ],
# )
#
# demo.launch()

# #from langchain.chat_models import ChatOpenAI
# from langchain.schema import AIMessage, HumanMessage
# import gradio as gr
#
# from get_ollama import GetOllama
#
# llm = GetOllama(model_type=1)()
#
#
# def predict(message, history):
#     history_langchain_format = []
#     for human, ai in history:
#         history_langchain_format.append(HumanMessage(content=human))
#         history_langchain_format.append(AIMessage(content=ai))
#     history_langchain_format.append(HumanMessage(content=message))
#     gpt_response = llm.invoke(history_langchain_format)
#     return gpt_response.content
#
# gr.ChatInterface(predict).launch()

from customize.get_ollama import GetOllama
import gradio as gr

# api_key = "sk-..."  # Replace with your key
client = GetOllama(ip="10.12.8.21:11434", model_name="qwen2.5:14b", model_type=1)()


def predict(message, history):
    history_openai_format = []
    for human, assistant in history:
        history_openai_format.append({"role": "user", "content": human})
        history_openai_format.append({"role": "assistant", "content": assistant})
    history_openai_format.append({"role": "user", "content": message})

    # response = client.chat.completions.create(model='gpt-3.5-turbo',
    #                                           messages=history_openai_format,
    #                                           temperature=1.0,
    #                                           stream=True)
    response = client.stream(history_openai_format)

    partial_message = ""
    for chunk in response:
        # if chunk.choices[0].delta.content is not None:
        if chunk.content is not None:
            partial_message = partial_message + chunk.content
            yield partial_message


gr.ChatInterface(predict).launch()