import base64
import io
import os
import uuid

from PIL import Image
from langchain_community.chat_message_histories import SQLChatMessageHistory
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.messages import HumanMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
from openai import OpenAI
import gradio as gr

os.environ["DASHSCOPE_API_KEY"] = "sk-******"
api_key = os.getenv("DASHSCOPE_API_KEY")
if not api_key:
    raise ValueError("DASHSCOPE_API_KEY environment variable not set!")

llm = ChatOpenAI(model='qwen-plus',
                 api_key=os.getenv("DASHSCOPE_API_KEY"),
                 base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
                 temperature=0.1)

multiModal_llm = ChatOpenAI(model='qwen3-omni-flash',
                            api_key=os.getenv("DASHSCOPE_API_KEY"),
                            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
                            streaming=True,
                            )

# 1. 初始化客户端
client = OpenAI(
    api_key=os.getenv("DASHSCOPE_API_KEY"),  # 确认已配置环境变量
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)

prompt = ChatPromptTemplate.from_messages([
    ("system", "你是一个多模态AI助手，可以处理文本、音频和图像输入"),
    MessagesPlaceholder(variable_name="messages", Optional=True),  # 历史消息
])

chain = prompt | multiModal_llm  # 基础链


# 聊天记录存在关系型数据库，或者redis
def get_session_history(session_id: str):
    """从内存中里历史消息列表中返回当前会话的所有历史消息"""
    return SQLChatMessageHistory(
        session_id=session_id,
        connection_string="mysql+pymysql://root:******@localhost:13306/chat_history?charset=utf8mb4",
        table_name="chat_history_message",
    )


chat_history = RunnableWithMessageHistory(
    chain,
    get_session_history,
)

user_msg = HumanMessage(content=[{
    "type": "text",
    "text": "你是谁?"
}])

# config = {"configurable": {"session_id": str(uuid.uuid4())}}
config = {"configurable": {"session_id": "user123"}}


# resp1 = chat_history.invoke({'messages': [user_msg]}, config=config)
# print(resp1.content)


def add_message(history, messages):
    #  包含文本消息和语音消息等 message为字典，因为可能有图片，语音等
    for m in messages['files']:
        print(m)
        history.append({"role": "user", "content": {'path': m}})
    if messages['text']:
        history.append({"role": "user", "content": messages['text']})
    return history, ''  # 返回更新后的历史和重置的输入框


def get_last_user_after_assistant(history):
    # 获取最后一条用户消息
    if not history:
        return None
    if history[-1]["role"] == "assistant":
        return None
    last_assistant_idx = -1
    for i in range(len(history) - 1, -1, -1):
        if history[i]["role"] == "assistant":
            last_assistant_idx = i
            break
    #  如果没有找到助手消息，返回整个历史中的最后一条用户消息
    if last_assistant_idx == -1:
        return history
    else:
        return history[last_assistant_idx + 1:]


def submit_messages(history):
    # 提交用户的消息，生成机器人回复
    user_messages = get_last_user_after_assistant(history)
    print(user_messages)
    content = []
    if user_messages:
        for x in user_messages:
            if isinstance(x['content'], str):  # 文字类型
                content.append({"type": "text", "text": x['content']})
            elif isinstance(x['content'], tuple):  # 多媒体输入消息
                file_path = x['content'][0]  # 得到多媒体文件路径
                if file_path.endswith('.wav'):
                    file_message = transcribe_audio(file_path)
                    content.append(file_message)
                    input_message = {
                        "role": "user",
                        "content": content
                    }
                    return callTongYillm(input_message, history)
                elif file_path.endswith('.mp3') or file_path.endswith('.mp4'):
                    file_message = transcribe_video(file_path)
                    content.append(file_message)
                    input_message = {
                        "role": "user",
                        "content": content
                    }
                    return callTongYillm(input_message, history)
                elif file_path.endswith(".jpg") or file_path.endswith(".png") or file_path.endswith(".jpeg"):
                    file_message = transcribe_image(file_path)
                content.append(file_message)
            else:
                pass
    user_message = HumanMessage(content=content)

    resp = chat_history.invoke({'messages': [user_message]}, config=config)
    history.append({"role": "assistant", "content": resp.content})
    return history


def callTongYillm(input_message: str, history: str):
    completion = client.chat.completions.create(
        model="qwen3-omni-flash",  # 模型为Qwen3-Omni-Flash时，请在非思考模式下运行
        messages=[input_message],
        # 设置输出数据的模态，当前支持两种：["text","audio"]、["text"]
        modalities=["text", "audio"],
        audio={"voice": "Cherry", "format": "wav"},
        # stream 必须设置为 True，否则会报错
        stream=True,
        stream_options={"include_usage": True},
    )
    chunks = []
    for chunk in completion:
        if chunk.choices:
            print(chunk.choices[0].delta.content)
            if chunk.choices[0].delta.content:
                chunks.append(chunk.choices[0].delta.content)
        # else:
        #     print(chunk.usage)
        #     if chunk.usage:
        #         audio_chunks.append(chunk.usage)
    if chunks:
        history.append({"role": "assistant", "content": ''.join(chunks)})
    return history


def transcribe_audio(audio_path):
    """使用base64处理语音转为"""
    """目前多模态大模型：支持两个传参方式：
    1.base64字符串本地
    2.网络url地址（外网服务器上）http：//sxxxxx.com/xx.mp3"""
    try:
        with open(audio_path, "rb") as audio_file:
            audio_data = base64.b64encode(audio_file.read()).decode("utf-8")
        audio_message = {  # 把音频文件封装成一条消息
            "type": "input_audio",
            "input_audio": {
                "data": f"data:;base64,{audio_data}",
                "format": "wav"
            },
        }
        return audio_message
    except Exception as e:
        print(f"Error reading audio file: {e}")
        return {}
    # audio_input = "中国最大的淡水湖是哪个湖？"
    # audio_message = {
    #     "type": "text",
    #     "text": f"请转录或分析以下音频内容：{audio_input}"
    # }
    # return audio_message


def transcribe_video(video_path):
    """使用base64处理视频转为"""
    """目前多模态大模型：支持两个传参方式：
    1.base64字符串本地
    2.网络url地址（外网服务器上）http：//sxxxxx.com/xx.mp4"""
    try:
        with open(video_path, "rb") as video_file:
            video_data = base64.b64encode(video_file.read()).decode("utf-8")
        video_message = {  # 把音频文件封装成一条消息
            "type": "video_url",
            "video_url": {
                "url": f"data:;base64,{video_data}",
            },
        }
        return video_message
    except Exception as e:
        print(f"Error reading audio file: {e}")
        return {}


def transcribe_image(image_path):
    """将任意格式的图片转化为base64编码的data URL
    :param image_path 图片路径
    ：return 包含base64编码的字典"""
    with Image.open(image_path) as img:
        img_format = img.format if img.format else 'JPEG'
        buffered = io.BytesIO()
        img.save(buffered, format=img_format)

        img_data = base64.b64encode(buffered.getvalue()).decode("utf-8")
        return {
            "type": "image_url",
            "image_url": {
                "url": f"data:image/{img_format.lower()};base64,{img_data}",
                "detail": 'low'
            }
        }


with gr.Blocks(title="多聊天机器人", theme=gr.themes.Soft()) as block:
    chatbot = gr.Chatbot(type='messages', height=500, label="Chat with Bot")

    user_input = gr.MultimodalTextbox(
        interactive=True,
        file_types=['image', '.wav', '.mp4', 'video'],
        file_count="multiple",
        placeholder="Type a message or upload a file (image, audio, video)...",
        show_label=False,  # 隐藏标签
        sources=["microphone", "upload"]  # 只显示麦克风和上传按钮
    )

    chat_message = user_input.submit(add_message, [chatbot, user_input], [chatbot, user_input])  # 用户按回车提交消息
    chat_message.then(submit_messages, [chatbot], [chatbot])
    # .then(lambda: gr.MultimodalTextbox(interactive=True),
    #                                                             # 匿名函数重置输入框
    #                                                             None,
    #                                                             [user_input]  # 输出到输入框
    #                                                             )

# 千问3支持全模态，如果需要重新下载模型
if __name__ == '__main__':
    block.launch()
