import io
import os
import tempfile
import platform
import pyttsx3
from pydub import AudioSegment
import torch
from torch import nn
from torchvision.models import GoogLeNet_Weights
import torchvision.models as models
from torchvision.transforms import Compose, ToTensor, Resize
import matplotlib.pyplot as plt
import streamlit as st
from PIL import Image
import dashscope
from dashscope import Generation
from dashscope.audio.tts import SpeechSynthesizer, ResultCallback, SpeechSynthesisResult

# 配置api密钥
dashscope.api_key = "sk-ec38daeb0ceb42c1a580a70316c64717"


# 调用千问大模型2.5
def qw_generation(input_text):
    response = Generation.call(
        model="qwen2.5-1.5b-instruct",
        prompt=input_text,
    )
    if response.status_code == 200:  # 响应成功
        return response.output.text
    else:
        return "抱歉，无法获取模型响应"


# 初始化模型
model_GoogLeNet = models.googlenet(weights=GoogLeNet_Weights.DEFAULT).to('cpu')
plt.rcParams["font.sans-serif"] = ["SimHei"]  # 设置中文显示
plt.rcParams["axes.unicode_minus"] = False  # 正常显示负号


class MyCallback(ResultCallback):
    def __init__(self):
        self.audio_frames = []
        self.temp_audio_path = None

    def on_open(self):
        print("语音合成器已打开")

    def on_event(self, result: SpeechSynthesisResult):
        if result.get_audio_frame() is not None:
            self.audio_frames.append(result.get_audio_frame())
            print(f"音频帧长度: {len(result.get_audio_frame())}")
        if result.get_timestamp() is not None:
            print(f"时间戳: {result.get_timestamp()}")

    def on_complete(self):
        print("语音合成完成")
        if self.audio_frames:
            # 保存音频帧到临时文件
            with tempfile.NamedTemporaryFile(mode='wb', suffix='.pcm', delete=False) as f:
                for frame in self.audio_frames:
                    f.write(frame)
                self.temp_audio_path = f.name

            # 将PCM转换为WAV格式
            try:
                audio = AudioSegment(
                    data=open(self.temp_audio_path, 'rb').read(),
                    sample_width=2,
                    frame_rate=48000,
                    channels=1
                )
                wav_path = self.temp_audio_path.replace('.pcm', '.wav')
                audio.export(wav_path, format='wav')
                st.session_state.dashscope_audio_file = wav_path
                st.session_state.tts_status = "语音生成成功"

                # 在Streamlit中显示音频播放器
                if getattr(st, '_is_running', False) or os.environ.get('STREAMLIT_SERVER_PORT'):
                    st.audio(wav_path, format="audio/wav", autoplay=True)
            except Exception as e:
                print(f"音频转换失败: {e}")
                st.session_state.tts_status = f"音频转换失败: {e}"

    def on_error(self, error):
        print(f"合成出错: {error}")
        st.session_state.tts_status = f"合成错误: {error}"


def dashscope_text_to_speech(text):
    if not text:
        st.warning("没有可播报的文本")
        return

    try:
        st.session_state.tts_status = "正在生成语音..."
        synthesizer = SpeechSynthesizer()
        callback = MyCallback()

        synthesizer.call(
            text=text,
            callback=callback,
            model="sambert-zhinan-v1",
            sample_rate=48000,
            format="pcm"
        )

    except Exception as e:
        st.error(f"语音合成失败: {str(e)}")
        st.session_state.tts_status = f"合成错误: {str(e)}"


def test_GoogLeNet(file_path):
    image = Image.open(file_path)
    img = Compose([
        Resize(224),  # 调整大小
        ToTensor(),
    ])(image)
    with torch.no_grad():
        y_pre = model_GoogLeNet(img.unsqueeze(0))
    return y_pre.argmax(dim=-1).item(), img


def clear_history():
    st.session_state.history = []
    st.session_state.local_audio_file = None
    st.session_state.dashscope_audio_file = None
    st.session_state.tts_status = "就绪"


# 初始化pyttsx3引擎
def init_engine():
    engine = pyttsx3.init()
    engine.getProperty('voices')
    engine.setProperty('rate', 170)
    return engine


# 语音播报函数（使用pyttsx3）
def text_to_speech(text):
    if not text:
        st.warning("没有可播报的文本")
        return None
    try:
        st.session_state.tts_status = "正在生成语音..."
        system = platform.system()
        if system == "Windows":
            temp_dir = os.environ.get('TEMP', os.environ.get('TMP', '/tmp'))
        else:
            temp_dir = '/tmp'

        local_audio_file = os.path.join(temp_dir, f"speech_{hash(text)}.mp3")

        # 初始化引擎
        engine = init_engine()

        # 保存语音到文件
        engine.save_to_file(text, local_audio_file)
        engine.runAndWait()

        st.session_state.tts_status = "语音生成成功"
        return local_audio_file
    except Exception as e:
        st.error(f"语音合成失败: {str(e)}")
        st.session_state.tts_status = f"合成错误: {str(e)}"
        return None


# streamlit UI页面
def main():
    mnist45_labels = {
        0: "大象", 1: "羊驼", 2: "耗牛", 3: "长嘴小怪兽", 4: "小狐狸",
        5: "armadillo", 6: "baboon", 7: "badger", 8: "blue_whale", 9: "brown_bear",
        10: "camel", 11: "dolphin", 12: "长颈鹿", 13: "groundhog", 14: "highland_cattle",
        15: "马", 16: "jackal", 17: "kangaroo", 18: "koala", 19: "manatee",
        20: "mongoose", 21: "mountain_goat", 22: "opossum", 23: "orangutan", 24: "otter",
        25: "polar_bear", 26: "porcupine", 27: "小浣熊", 28: "rhinoceros", 29: "sea_lion",
        30: "seal", 31: "snow_leopard", 32: "squirrel", 33: "suger_glider", 34: "tapir",
        35: "vampire_bat", 36: "vicuna", 37: "walrus", 38: "warthog", 39: "water_buffalo",
        40: "weasel", 41: "wildebeest", 42: "wombat", 43: "yak", 44: "zebra"
    }

    # 初始化会话状态
    if 'history' not in st.session_state:
        st.session_state.history = []
    if 'local_audio_file' not in st.session_state:
        st.session_state.local_audio_file = None
    if 'dashscope_audio_file' not in st.session_state:
        st.session_state.dashscope_audio_file = None
    if 'tts_status' not in st.session_state:
        st.session_state.tts_status = "就绪"

    # 侧边栏设置
    with st.sidebar:
        st.title("侧边栏")
        task = st.selectbox('选择使用的模型', ['千问大模型2.5', "待添加"])
        speak_button = st.button("语音介绍")
        st.button("清空历史信息", on_click=clear_history)
        st.info(f"语音状态: {st.session_state.tts_status}")

    # 主体设置
    st.title("智能相册")
    st.write("请上传动物图片或输入文本进行交互")

    # 显示历史信息
    for msg in st.session_state.history:
        with st.chat_message(msg['role']):
            if msg['role'] == 'user':
                # 用户消息：可能是图片或文本
                if isinstance(msg['content'], Image.Image):
                    st.image(msg['content'])
                else:
                    st.markdown(msg['content'])
            else:
                # 助手消息：文本
                st.markdown(msg['content'])
            # 显示音频（如果有）
            if st.session_state.dashscope_audio_file and os.path.exists(st.session_state.dashscope_audio_file):
                st.audio(st.session_state.dashscope_audio_file, format="audio/wav")

    # 1. 图片上传功能
    uploaded_file = st.file_uploader("上传图片（jpg/jpeg/png）", type=['jpg', 'jpeg', 'png'])
    if uploaded_file:
        try:
            image = Image.open(uploaded_file)
            # 显示用户上传的图片
            with st.chat_message("user"):
                st.image(image, caption="用户上传的图片")
            st.session_state.history.append({"role": "user", "content": image})

            # 模型预测
            state_dict = torch.load("static/GoogLeNet_mammals_best_model.pth", map_location=torch.device('cpu'))
            model_GoogLeNet.fc = nn.Linear(1024, 45)  # 修改输出维度为45
            model_GoogLeNet.load_state_dict(state_dict)
            model_GoogLeNet.eval()
            class_prediction, img = test_GoogLeNet(uploaded_file)
            ret = mnist45_labels.get(class_prediction, "未知动物")

            # 获取模型介绍
            response_text = qw_generation(f"请介绍一下{ret}这种动物")
            with st.chat_message("assistant"):
                st.markdown(f"这是一张{ret}的图片，{response_text}")
            st.session_state.history.append({"role": "assistant", "content": f"这是一张{ret}的图片，{response_text}"})

            # 生成语音
            latest_msg = next((m for m in reversed(st.session_state.history) if m['role'] == 'assistant'), None)
            if latest_msg:
                dashscope_text_to_speech(latest_msg['content'])

        except Exception as e:
            error_msg = "识别失败，请检查图片格式或模型文件"
            st.session_state.history.append({"role": "assistant", "content": error_msg})
            st.error(f"错误: {str(e)}")

    # 2. 文本输入功能
    text_prompt = st.chat_input("输入文本进行交互...")
    if text_prompt:
        # 显示用户输入的文本
        with st.chat_message("user"):
            st.markdown(text_prompt)
        st.session_state.history.append({"role": "user", "content": text_prompt})

        # 调用大模型处理文本
        response_text = qw_generation(text_prompt)
        with st.chat_message("assistant"):
            st.markdown(response_text)
        st.session_state.history.append({"role": "assistant", "content": response_text})

        # 生成语音
        latest_msg = next((m for m in reversed(st.session_state.history) if m['role'] == 'assistant'), None)
        if latest_msg:
            dashscope_text_to_speech(latest_msg['content'])

    # 语音播报功能
    if speak_button:
        if not st.session_state.history:
            st.warning("没有历史记录可供播报")
            return
        latest_message = next(
            (msg for msg in reversed(st.session_state.history) if msg['role'] == 'assistant'),
            None
        )
        if not latest_message:
            st.warning("没有可播报的内容")
            return
        text = latest_message['content']
        st.info(f"正在准备播报: {text[:30]}...")
        local_audio_file = text_to_speech(text)
        if local_audio_file and os.path.exists(local_audio_file):
            st.info("本地语音播放")
            st.session_state.local_audio_file = local_audio_file
            st.audio(local_audio_file, format="audio/mp3")
        else:
            st.error("无法播放音频文件，请检查文件路径是否正确")


if __name__ == "__main__":
    main()