import streamlit as st
from LeNet import LeNet5
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, ToTensor, Normalize, ColorJitter, RandomRotation, \
    RandomHorizontalFlip
from torchvision import models
import torch.nn as nn
from 数据集 import mnist10_labels
import matplotlib.pyplot as plt
import json
from facenet_pytorch import MTCNN, InceptionResnetV1
import pickle
import os
import pyttsx3
import dashscope
from dashscope import Generation
import speech_recognition as sr
import cv2
import numpy as np
import easyocr
from googletrans import Translator
import io
import pandas as pd
import graphviz

# 设置全局字体为黑体
plt.rcParams["font.sans-serif"] = ["SimHei"]
# 解决负号显示问题
plt.rcParams["axes.unicode_minus"] = False

# 路径配置
DATA_PATH = 'images'
SAVE_DIR = DATA_PATH
AUDIO_DIR = 'audio'  # 音频保存目录
if not os.path.exists(SAVE_DIR):
    os.makedirs(SAVE_DIR)
if not os.path.exists(AUDIO_DIR):
    os.makedirs(AUDIO_DIR)

# 人脸识别模型初始化
try:
    mtcnn = MTCNN(keep_all=False)  # 只保留置信度最高的人脸
except Exception as e:
    print(f'创建mtcnn实例对象失败：{e}')
    st.error(f'模型初始化失败: {e}')
    mtcnn = None

try:
    resnet = InceptionResnetV1(pretrained='vggface2').eval()
except Exception as e:
    print(f'创建模型失败:{e}')
    st.error(f'模型初始化失败: {e}')
    resnet = None

# 加载已保存的人脸特征
face_dict = {}
for entry in os.scandir(DATA_PATH):
    if entry.name.endswith('.embed'):
        face_name = os.path.splitext(entry.name)[0]
        try:
            with open(entry.path, 'rb') as f:
                face_tensor = pickle.load(f)
            face_dict[face_name] = face_tensor
        except Exception as e:
            print(f'加载人脸特征失败: {face_name}, 错误: {e}')

# 模型创建 加载参数 评估
# LeNet模型
model_1 = LeNet5()
state_dict = torch.load('model/best_lenet_model_45class.pth', map_location=torch.device('cpu'))
model_1.load_state_dict(state_dict)
model_1.eval()


# AlexNet模型
@st.cache_resource
def load_alexnet(num_classes=45):
    # 加载预训练模型
    model = models.alexnet(weights=None)
    # 修改最后一层全连接 为45分类输出
    in_features = model.classifier[6].in_features
    model.classifier[6] = nn.Linear(in_features, num_classes)
    model.load_state_dict(torch.load('model/best_alexnet_model_45class.pth', map_location=torch.device('cpu')))
    model.eval()
    return model


# 调用函数 初始化模型
model_2 = load_alexnet()


# VGGNet模型
@st.cache_resource
def load_VGG16(num_classes=45):
    model = models.vgg16()
    in_features = model.classifier[6].in_features
    model.classifier[6] = nn.Linear(in_features, num_classes)
    model.load_state_dict(torch.load('model/best_vgg16_model_45class.pth', map_location=torch.device('cpu')))
    model.eval()
    return model


model_3 = load_VGG16()


# GoogLeNet模型
@st.cache_resource
def load_GoogLeNet(num_classes=45):
    model = models.googlenet(weights='IMAGENET1K_V1')
    model.fc = nn.Linear(model.fc.in_features, num_classes)
    model.load_state_dict(torch.load('model/best_googlenet_model.pth', map_location=torch.device('cpu')))
    model.eval()
    return model


model_4 = load_GoogLeNet()


# 进行预测
@st.cache_resource
def test_1(file_path):
    image = Image.open(file_path)
    img = Compose([
        Resize((224, 224)),
        RandomHorizontalFlip(),
        RandomRotation(15),
        ColorJitter(0.2, 0.2, 0.2, 0.1),
        ToTensor(),
        Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])(image)
    with torch.no_grad():
        y_pre = model_1(img.unsqueeze(0))
    return y_pre


@st.cache_resource
def test_2(file_path):
    image = Image.open(file_path)
    img = Compose([
        Resize((224, 224)),
        ToTensor(),
        Normalize([0.485, 0.456, 0.406],
                  [0.229, 0.224, 0.225])
    ])(image)
    with torch.no_grad():
        y_pre = model_2(img.unsqueeze(0))
    return y_pre


@st.cache_resource
def test_3(file_path):
    image = Image.open(file_path)
    img = Compose([
        Resize((224, 224)),
        ToTensor(),
        Normalize([0.485, 0.456, 0.406],
                  [0.229, 0.224, 0.225])
    ])(image)
    with torch.no_grad():
        y_pre = model_3(img.unsqueeze(0))
    return y_pre


@st.cache_resource
def test_4(file_path):
    image = Image.open(file_path)
    img = Compose([
        Resize((224, 224)),
        ToTensor()
    ])(image)
    with torch.no_grad():
        y_pre = model_4(img.unsqueeze(0))
    return y_pre


# 清空历史信息
def clear_history():
    st.session_state.history = []


# 模型架构差异可视化
@st.cache_resource
def draw_module_comparison():
    col1, col2 = st.columns(2)
    graph_1 = graphviz.Digraph()
    graph_2 = graphviz.Digraph()
    # Inception 模块
    with col1:
        st.subheader("Inception 模块结构图")
        graph_1.edge("run", "输入")
        graph_1.edge("输入", "1×1 Conv")
        graph_1.edge("1×1 Conv", "3×3 Conv")
        graph_1.edge("3×3 Conv", "5×5 Conv")
        graph_1.edge("5×5 Conv", "3×3 MaxPool")
        graph_1.edge("3×3 MaxPool", "Concat")
        graph_1.edge("Concat", "输出")
        st.graphviz_chart(graph_1)

    # ResNet 残差连接
    with col2:
        st.subheader("ResNet 残差连接结构图")
        graph_2.edge("run", "输入")
        graph_2.edge("输入", "卷积层1")
        graph_2.edge("卷积层1", "卷积层2")
        graph_2.edge("卷积层2", "输出")
        st.graphviz_chart(graph_2)


# 配置api秘钥
dashscope.api_key = 'sk-90b94635a65942c3a902ffebff6c3ce6'


# 调用 Qwen2.5大模型进行文本生成
@st.cache_resource
def text_generation(input_text):
    response = Generation.call(
        model='qwen2.5-1.5b-instruct',
        prompt=input_text
    )
    # 对响应的结果进行判断
    if response.status_code == 200:  # 响应成功
        return response.output.text
    else:
        return None


def speak_text_with_progress(text, play_audio=True, rate=120):
    if not text:
        return
    # 生成包含语速参数的唯一音频文件名
    audio_file = os.path.join(AUDIO_DIR, f"speech_{hash(text)}_{rate}.wav")

    # 如果音频文件不存在，则生成
    if not os.path.exists(audio_file):
        try:
            engine = pyttsx3.init()
            # 设置语速
            engine.setProperty('rate', rate)
            engine.save_to_file(text, audio_file)
            engine.runAndWait()
            engine.stop()
        except Exception as e:
            st.error(f"语音合成失败: {e}")
            return None
    # 在Streamlit中播放音频，自动显示进度条
    if play_audio:
        with open(audio_file, "rb") as f:
            audio_bytes = f.read()
        st.audio(audio_bytes, format="audio/wav")
    return audio_file


# OCR文字提取
def ocr(_image):
    # 将 PIL.Image 转为 OpenCV 图像（BGR 格式）
    img = cv2.cvtColor(np.array(_image), cv2.COLOR_RGB2BGR)
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # 二值化
    _, img_binary = cv2.threshold(img_gray, 0, 255,
                                  cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
    # 高斯滤波
    img_blur = cv2.GaussianBlur(img_binary, (5, 5), 7)
    # 3. 形态学闭运算填补缝隙
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    morph = cv2.morphologyEx(img_blur, cv2.MORPH_CLOSE, kernel)
    # 寻找轮廓
    contours, _ = cv2.findContours(morph,
                                   mode=cv2.RETR_LIST,  # 查找轮廓的方式
                                   method=cv2.CHAIN_APPROX_SIMPLE)  # 轮廓的近似方式
    # 进行排序
    cnt = sorted(contours, key=cv2.contourArea, reverse=True)[0]
    # 对最大轮廓做逼近
    zc = cv2.arcLength(cnt, True)
    approx = cv2.approxPolyDP(cnt, zc * 0.05, True)
    # 绘制逼近后的轮廓
    cv2.drawContours(img, [approx], -1, (0, 0, 255), 1)
    # 进行矫正
    if len(approx) == 4:
        src = np.float32(approx).reshape(-1, 2)
        x = src[:, 0]
        y = src[:, 1]
        dst = np.float32([
            [min(x), min(y)],
            [min(x), max(y)],
            [max(x), max(y)],
            [max(x), min(y)]
        ])
        M = cv2.getPerspectiveTransform(src, dst)
        img_Perspective = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))
        image_np = img_Perspective
    else:
        image_np = img
    reader = easyocr.Reader(
        ['ch_sim', 'en'],
        model_storage_directory=r'model/models',
        gpu=False,
        download_enabled=False  # 禁用在线下载
    )
    results = reader.readtext(image_np)
    plate_text = "".join([res[1] + "\n" for res in results])
    return plate_text


# OCR文字翻译
def ocr_Translation(text):
    translator = Translator()
    translated_results = []
    try:
        translation = translator.translate(text, dest='zh-CN')
        return f"翻译: {translation.text}"
    except Exception as e:
        translated_results.append((text, f"翻译失败:{e}"))


# OCR流程图
def flow_chart():
    graph = graphviz.Digraph()
    # 上传图片 → 文字检测 → 字符识别 → 文本输出
    st.subheader("OCR流程图")
    graph.edge("run", "上传图片")
    graph.edge("上传图片", "文字检测")
    graph.edge("文字检测", "字符识别")
    graph.edge("字符识别", "文本输出")
    graph.edge("文本输出", "结束")
    st.graphviz_chart(graph)

# 采集人脸信息
def save_img(file_path, face_name):
    image_pil = Image.open(file_path).convert('RGB')  # 转为RGB，防止alpha通道问题
    image = cv2.cvtColor(np.array(image_pil), cv2.COLOR_RGB2BGR)  # 转为cv2格式
    boxes, probs, points = mtcnn.detect(image, landmarks=True)
    if boxes is not None:  # 有框
        idx = probs.argmax()  # 选择置信度最高的人脸
        face_box = boxes[idx].astype(int)  # 人脸框坐标转为整数
        face_tensor = mtcnn.extract(image, boxes[idx].reshape(1, -1), None)
        face_embed = resnet(face_tensor.unsqueeze(0))
        with open(f'images/{face_name}.embed', 'wb') as f:
            pickle.dump(face_embed, f)
            # 在图像上绘制人脸框
            x1, y1, x2, y2 = face_box
            cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
            annotated_image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))

            # 显示图像和坐标
            st.success(f'成功保存 {face_name} 的人脸信息')
            st.image(annotated_image, caption=f"检测到的人脸框：{face_box.tolist()}")
            st.write(f"人脸框坐标：{face_box.tolist()}")
    else:
        st.warning("未检测到人脸，请重试")


# 比对信息
def comparison_img(image):
    if isinstance(image, str):
        # 如果传入的是路径，打开图像
        image_pil = Image.open(image).convert('RGB')
    else:
        # 如果传入的是图像对象，直接使用
        image_pil = image.convert('RGB')
    image = cv2.cvtColor(np.array(image_pil), cv2.COLOR_RGB2BGR)
    boxes, probs, points = mtcnn.detect(image, landmarks=True)

    if boxes is not None and len(boxes) > 0:  # 有框
        idx = np.argmax(probs)
        face_tensor = mtcnn.extract(image, boxes[idx].reshape(1, -1), None)
        if face_tensor is None:
            st.error("无法提取人脸特征")
            return
        face_embed = resnet(face_tensor.unsqueeze(0))
        # 与数据库中的所有人脸进行比较
        max_score = float('-inf')
        max_score_idx = -1
        for i, ds_face in enumerate(face_dict.values()):
            # 计算相似度
            score = nn.functional.cosine_similarity(face_embed, ds_face)
            if score > max_score:
                max_score = score
                max_score_idx = i

        if max_score > 0.6:
            result = list(face_dict.keys())[max_score_idx]
            st.success(f"识别结果: {result} (相似度: {max_score.item():.2f})")
            speak_text_with_progress(f"识别结果为{result}")
        else:
            st.error('信息未采集，不在数据库中，请尽快添加到数据库')
            speak_text_with_progress("信息未采集，不在数据库中，请尽快添加到数据库")
    else:
        st.warning("未检测到人脸")


def main():
    # 初始化会话状态
    if 'uploaded_image' not in st.session_state:
        st.session_state.uploaded_image = None
    if 'recognize_clicked' not in st.session_state:
        st.session_state.recognize_clicked = False
    if 'ocr_identify' not in st.session_state:
        st.session_state.ocr_identify = False
    if 'ocr_translation' not in st.session_state:
        st.session_state.ocr_translation = False
    if 'contrast_voice' not in st.session_state:
        st.session_state.contrast_voice = False
    if "ocr_result" not in st.session_state:
        st.session_state.ocr_result = None
    if 'history' not in st.session_state:
        st.session_state.history = []

    st.title('AI多模态智能交互平台')
    st.write('请上传一张图片进行识别,支持多轮交互~~~')
    col1, col2, col3, col4 = st.columns(4)
    with col1:
        st.button('模型架构差异', on_click=draw_module_comparison)
    with col2:
        st.button('OCR流程图', on_click=flow_chart)
    with col3:
        if st.button('语音识别效果对比'):
            st.session_state.contrast_voice = True
    with col4:
        if st.button('OCR识别翻译'):
            st.session_state.ocr_translation = True

    prompt = st.chat_input("上传图片或输入文字", accept_file=True, file_type=['jpg', 'jpeg', 'png'])

    if prompt:
        if prompt.text:
            st.session_state.history.append({'role': 'user', 'content': prompt.text})
            response_text = text_generation(prompt.text)
            st.session_state.history.append({'role': 'ai', 'content': response_text})
            speak_text_with_progress(response_text)

        if prompt.files and len(prompt.files) > 0:
            st.session_state.uploaded_file = prompt.files[0]
            image = Image.open(prompt.files[0]).convert("RGB")
            st.session_state.uploaded_image = image
            # st.image(image, caption="已上传图片")
            st.session_state.history.append({'role': 'user', 'content': image})

    # 侧边栏按钮
    with st.sidebar:
        st.title('侧边栏')
        face_name = st.text_input('请输入录入人脸的姓名：')
        st.button('清空历史信息', on_click=clear_history)

        if st.button("图像分类"):
            st.session_state.recognize_clicked = True

        if st.button('语音输入'):
            st.session_state.trigger_voice = True

        if st.button('文档助手'):
            st.session_state.ocr_identify = True

        if st.button('采集信息'):
            if face_name.strip() == '':
                st.warning('请输入有效的姓名')
            else:
                save_img(st.session_state.uploaded_file, face_name)
        if st.button('比对'):
            if st.session_state.uploaded_image is not None:
                comparison_img(st.session_state.uploaded_image)
            else:
                st.warning("请先上传或拍摄一张人脸照片")

    # 展示历史信息
    for msg in st.session_state.history:
        with st.chat_message(msg['role']):
            if isinstance(msg['content'], Image.Image):
                st.image(msg['content'], caption='用户上传的图片')
            else:
                st.text(msg['content'])

    # 处理语音输入
    if st.session_state.get('trigger_voice', False):
        # 1、创建识别器
        r = sr.Recognizer()
        # 2、使用麦克风录制声音
        with sr.Microphone() as source:
            st.write('开始录音...')
            audio = r.listen(source)
        # 3、对录制的音频进行处理——也就是转文本
        try:
            text = r.recognize_whisper(audio, language='chinese')
            st.write(f'识别结果是:{text}')
            # 将识别结果追加到历史
            st.session_state.history.append({'role': 'user', 'content': text})
        except Exception as e:
            st.write(f'无法识别错误，遇见错误是{e}')
        # 尝试从历史记录中找最后一条用户输入作为 prompt
        user_msgs = text
        if user_msgs:
            latest_prompt = user_msgs[-1]
            response_text = text_generation(latest_prompt)
            st.session_state.generated_text = response_text  # 保存结果
            st.session_state.history.append({'role': 'ai', 'content': response_text})  # 加入历史记录
            st.write(response_text)
            speak_text_with_progress(response_text)
        # 识别完成后重置触发器
        st.session_state.trigger_voice = False

    # 点击识别按钮且上传过图片时执行识别
    if st.session_state.recognize_clicked and st.session_state.uploaded_image:
        temp_buffer = io.BytesIO()
        st.session_state.uploaded_image.save(temp_buffer, format='PNG')
        temp_buffer.seek(0)
        models_result = {}
        model_funcs = {
            'LeNet': test_1,
            'AlexNet': test_2,
            'VGGNet': test_3,
            'GoogLeNet': test_4
        }
        cols = st.columns(2)  # 两列布局
        for i, (model_name, test_func) in enumerate(model_funcs.items()):
            y_pred = test_func(temp_buffer)
            temp_buffer.seek(0)
            probs = nn.functional.softmax(y_pred, dim=1).cpu().detach().numpy().flatten()
            top5_idx = probs.argsort()[-5:][::-1]
            top5_probs = probs[top5_idx]
            top5_labels = [mnist10_labels[i] for i in top5_idx]
            models_result[model_name] = {
                "Top-1": f"{top5_labels[0]} ({float(top5_probs[0]):.4f})",
                "Top-5": [(label, float(prob)) for label, prob in zip(top5_labels, top5_probs)]
            }
            with cols[i % 2]:
                fig, ax = plt.subplots(figsize=(4, 3))
                ax.barh(range(5), top5_probs[::-1], color='skyblue')
                ax.set_yticks(range(5))
                ax.set_yticklabels(top5_labels[::-1])
                ax.set_xlabel("预测概率")
                ax.set_title(f"{model_name} Top-5 预测", pad=20)
                ax.text(0.5, 1.30, f"Top-1: {models_result[model_name]['Top-1']}", ha='center', va='bottom',
                        transform=ax.transAxes, fontsize=10, fontweight='bold')
                st.pyplot(fig)
        with st.chat_message('assistant'):
            result_summary = "\n".join([f"{k} 预测: 这是{v['Top-1'].split(' ')[0]}" for k, v in models_result.items()])
            st.text(result_summary)
            speak_text_with_progress(result_summary)
        report_text = "\n".join([f"{k}: {v['Top-1']}" for k, v in models_result.items()])
        report_json = json.dumps(models_result, ensure_ascii=False, indent=2)
        st.download_button("下载 TXT 报告", report_text, file_name="prediction_report.txt")
        st.download_button("下载 JSON 报告", report_json, file_name="prediction_report.json")
        st.session_state.recognize_clicked = False

    # OCR识别
    if st.session_state.ocr_identify and st.session_state.uploaded_image:
        image = st.session_state.uploaded_image  # 读取图片对象
        ocr_text = ocr(image)
        st.session_state.ocr_result = ocr_text
        st.write('OCR识别结果')
        st.text(ocr_text)
        speak_text_with_progress(ocr_text)
        st.session_state.ocr_identify = False

    # 翻译
    if st.session_state.ocr_translation:
        # OCR翻译
        if st.session_state.ocr_result:
            translation_text = ocr_Translation(st.session_state.ocr_result)
            st.write('OCR翻译结果')
            st.text(translation_text)
            speak_text_with_progress(translation_text)
        else:
            st.warning('OCR未识别 无法翻译')
        st.session_state.ocr_translation = False

    # 语音识别效果对比
    if st.session_state.contrast_voice:
        # 构建对比数据
        data = {
            '属性': ['音色自然度', '语音流畅性', '多音字处理', '支持语种', '语速控制', '声音个性化'],
            'pyttsx3（本地）': [
                '较生硬，依赖系统 TTS 引擎',
                '一般，有卡顿感',
                '支持有限，依赖系统',
                '取决于系统支持的语音包',
                '支持语速调节',
                '不支持个性化音色'
            ],
            'dashscope（阿里）': [
                '高度自然，接近真人',
                '流畅连续，无明显停顿',
                '准确处理多音字，适配上下文',
                '支持中英等多语种',
                '精准语速调节',
                '支持多种音色和风格'
            ]
        }
        # 转换为 DataFrame
        df = pd.DataFrame(data)
        # 展示表格
        st.markdown("### pyttsx3 与 dashscope 语音合成对比")
        st.table(df)
        st.session_state.contrast_voice = False


if __name__ == '__main__':
    main()
