import base64
import io
from datetime import datetime

import pandas as pd
import requests
import streamlit as st
from PIL import Image

from entity.face_types import ValidMetrics

# 配置页面
st.set_page_config(
    page_title="媒体信息处理服务",
    page_icon="🎭",
    layout="wide",
    initial_sidebar_state="expanded",
)

# 自定义CSS样式
st.markdown(
    """
<style>
    .main-header {
        font-size: 2.5rem;
        color: #1E88E5;
        text-align: center;
        margin-bottom: 1rem;
    }
    .sub-header {
        font-size: 1.8rem;
        color: #0D47A1;
        margin-top: 1rem;
        margin-bottom: 0.5rem;
    }
    .card {
        background-color: #f9f9f9;
        border-radius: 10px;
        padding: 20px;
        box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
        margin-bottom: 20px;
    }
    .success-msg {
        color: #4CAF50;
        font-weight: bold;
    }
    .error-msg {
        color: #F44336;
        font-weight: bold;
    }
    .info-text {
        color: #555;
        font-size: 0.9rem;
    }
</style>
""",
    unsafe_allow_html=True,
)

# 全局变量
API_BASE_URL = "http://localhost:5555"


# 缓存API请求结果
@st.cache_data(ttl=300)
def get_health_status():
    try:
        response = requests.get(f"{API_BASE_URL}/health")
        return response.json()
    except Exception as e:
        return {"code": 500, "message": str(e), "data": {"status": "DOWN"}}


# 工具函数
def image_to_base64(image):
    """将PIL图像转换为base64编码"""
    if image is None:
        return None
    buffer = io.BytesIO()
    image.save(buffer, format="JPEG")
    return base64.b64encode(buffer.getvalue()).decode("utf-8")


def file_to_base64(file):
    """将上传的文件转换为base64编码"""
    if file is None:
        return None
    return base64.b64encode(file.getvalue()).decode("utf-8")


def audio_to_base64(audio_file):
    """将音频文件转换为base64编码"""
    if audio_file is None:
        return None
    return base64.b64encode(audio_file.read()).decode("utf-8")


def display_json_result(result, success_msg="操作成功"):
    """显示JSON结果"""
    if result["code"] == 200:
        st.success(success_msg)
        with st.expander("查看详细结果"):
            st.json(result)
        return result["data"]
    else:
        st.error(f"错误: {result['message']}")
        return None


# 侧边栏导航
st.sidebar.markdown(
    '<div class="main-header">媒体信息处理服务</div>', unsafe_allow_html=True
)
st.sidebar.image("static/images/logo.png", width=150, use_column_width=False)

# 获取健康状态
health_status = get_health_status()
if health_status["code"] == 200 and health_status["data"]["status"] == "UP":
    st.sidebar.success("✅ 服务状态: 正常")
else:
    st.sidebar.error("❌ 服务状态: 异常")

# 导航菜单
nav_options = ["首页", "人脸特征提取", "人脸匹配", "语音识别", "视频处理", "系统状态"]
selected_nav = st.sidebar.radio("功能导航", nav_options)

# 页脚
st.sidebar.markdown("---")
st.sidebar.markdown(
    f"<div class='info-text'>版本: {health_status.get('data', {}).get('version', '未知')}<br>"
    f"© {datetime.now().year} 媒体信息处理服务</div>",
    unsafe_allow_html=True,
)

# 首页
if selected_nav == "首页":
    st.markdown(
        '<div class="main-header">欢迎使用媒体信息处理服务</div>',
        unsafe_allow_html=True,
    )

    col1, col2, col3 = st.columns(3)

    with col1:
        st.markdown('<div class="card">', unsafe_allow_html=True)
        st.image("static/images/face.png", width=100)
        st.markdown("### 人脸处理")
        st.markdown("提取人脸特征、人脸匹配等功能")
        st.button(
            "前往人脸特征提取",
            on_click=lambda: st.session_state.update({"nav": "人脸特征提取"}),
        )
        st.markdown("</div>", unsafe_allow_html=True)

    with col2:
        st.markdown('<div class="card">', unsafe_allow_html=True)
        st.image("static/images/audio.png", width=100)
        st.markdown("### 语音识别")
        st.markdown("将语音转换为文本，支持多种语言")
        st.button(
            "前往语音识别",
            on_click=lambda: st.session_state.update({"nav": "语音识别"}),
        )
        st.markdown("</div>", unsafe_allow_html=True)

    with col3:
        st.markdown('<div class="card">', unsafe_allow_html=True)
        st.image("static/images/video.png", width=100)
        st.markdown("### 视频处理")
        st.markdown("视频帧提取、视频文字识别等功能")
        st.button(
            "前往视频处理",
            on_click=lambda: st.session_state.update({"nav": "视频处理"}),
        )
        st.markdown("</div>", unsafe_allow_html=True)

    st.markdown("---")
    st.markdown('<div class="sub-header">快速开始</div>', unsafe_allow_html=True)

    st.markdown("""
    1. 从左侧导航栏选择需要使用的功能
    2. 上传相应的媒体文件（图片、音频或视频）
    3. 设置所需参数
    4. 点击执行按钮，查看处理结果
    
    所有功能均通过API调用后端服务，确保后端服务正常运行。
    """)

# 人脸特征提取
elif selected_nav == "人脸特征提取":
    st.markdown('<div class="main-header">人脸特征提取</div>', unsafe_allow_html=True)

    with st.expander("功能说明", expanded=True):
        st.markdown("""
        上传包含人脸的图片，提取人脸特征信息，包括：
        - 人脸位置坐标
        - 性别预测
        - 年龄预测
        - 人脸特征向量
        """)

    col1, col2 = st.columns(2)

    with col1:
        st.markdown('<div class="card">', unsafe_allow_html=True)
        uploaded_image = st.file_uploader(
            "上传图片", type=["jpg", "jpeg", "png"], key="face_feature_upload"
        )

        if uploaded_image:
            image = Image.open(uploaded_image)
            st.image(image, caption="上传的图片", use_column_width=True)

            with st.expander("参数设置"):
                detection_model = st.selectbox(
                    "检测模型",
                    ["retinaface", "mtcnn", "dlib"],
                    index=0,
                    help="选择人脸检测使用的模型",
                )
                extract_model = st.selectbox(
                    "特征提取模型",
                    ["arcface", "facenet", "vggface"],
                    index=0,
                    help="选择特征提取使用的模型",
                )

            if st.button("提取人脸特征", key="extract_feature_btn"):
                with st.spinner("正在提取人脸特征..."):
                    try:
                        # 准备请求数据
                        img_base64 = image_to_base64(image)
                        payload = {
                            "image_base64": img_base64,
                            "detection_model": detection_model,
                            "extract_model": extract_model,
                        }

                        # 发送API请求
                        response = requests.post(
                            f"{API_BASE_URL}/extract_face_features", json=payload
                        )
                        result = response.json()

                        # 显示结果
                        features = display_json_result(result, "✅ 人脸特征提取成功！")

                        if features:
                            st.session_state["face_features"] = features
                    except Exception as e:
                        st.error(f"请求失败: {str(e)}")
        st.markdown("</div>", unsafe_allow_html=True)

    with col2:
        st.markdown('<div class="card">', unsafe_allow_html=True)
        st.markdown('<div class="sub-header">提取结果</div>', unsafe_allow_html=True)

        if "face_features" in st.session_state and st.session_state["face_features"]:
            features = st.session_state["face_features"]
            face_count = len(features)

            st.success(f"成功提取到 {face_count} 张人脸")

            for i, face in enumerate(features):
                with st.expander(f"人脸 #{i + 1}", expanded=(i == 0)):
                    cols = st.columns(2)
                    with cols[0]:
                        st.markdown(f"**位置:** {face['bbox']}")
                        st.markdown(
                            f"**性别:** {'男' if face['gender'] == 1 else '女'}"
                        )
                        st.markdown(f"**年龄:** {face['age']}")

                    with cols[1]:
                        if "landmarks" in face:
                            st.markdown("**关键点:**")
                            # 可视化关键点
                            try:
                                response = requests.post(
                                    f"{API_BASE_URL}/visualize_face_landmarks",
                                    json={"landmarks": face["landmarks"]},
                                )
                                vis_result = response.json()
                                if vis_result["code"] == 200:
                                    img_data = base64.b64decode(
                                        vis_result["data"]["image_base64"]
                                    )
                                    img = Image.open(io.BytesIO(img_data))
                                    st.image(img, caption="人脸关键点", width=150)
                            except Exception as e:
                                st.error(f"关键点可视化失败: {str(e)}")

                    # 特征向量可视化
                    if "feature" in face:
                        feature_vector = face["feature"]
                        if len(feature_vector) > 10:
                            # 只显示前10个元素
                            st.markdown(
                                f"**特征向量:** [{', '.join([f'{x:.4f}' for x in feature_vector[:10]])}...]"
                            )

                            # 绘制特征向量热力图
                            import matplotlib.pyplot as plt
                            import numpy as np

                            fig, ax = plt.subplots(figsize=(10, 2))
                            feature_array = np.array(feature_vector).reshape(1, -1)
                            im = ax.imshow(feature_array, cmap="viridis", aspect="auto")
                            ax.set_yticks([])
                            ax.set_title("特征向量可视化")
                            plt.colorbar(im, ax=ax, orientation="horizontal", pad=0.2)
                            st.pyplot(fig)
        else:
            st.info("请上传图片并点击提取按钮")
        st.markdown("</div>", unsafe_allow_html=True)

# 人脸匹配
elif selected_nav == "人脸匹配":
    st.markdown('<div class="main-header">人脸匹配</div>', unsafe_allow_html=True)

    tabs = st.tabs(["两张图片匹配", "一对多匹配"])

    # 两张图片匹配
    with tabs[0]:
        st.markdown(
            '<div class="sub-header">两张图片人脸匹配</div>', unsafe_allow_html=True
        )

        with st.expander("功能说明", expanded=True):
            st.markdown(
                "上传两张包含人脸的图片，计算它们的相似度，判断是否为同一个人。"
            )

        col1, col2 = st.columns(2)

        with col1:
            st.markdown('<div class="card">', unsafe_allow_html=True)
            st.markdown("### 图片1")
            image1 = st.file_uploader(
                "上传第一张图片", type=["jpg", "jpeg", "png"], key="match_img1"
            )
            if image1:
                img1 = Image.open(image1)
                st.image(img1, caption="图片1", use_column_width=True)
            st.markdown("</div>", unsafe_allow_html=True)

        with col2:
            st.markdown('<div class="card">', unsafe_allow_html=True)
            st.markdown("### 图片2")
            image2 = st.file_uploader(
                "上传第二张图片", type=["jpg", "jpeg", "png"], key="match_img2"
            )
            if image2:
                img2 = Image.open(image2)
                st.image(img2, caption="图片2", use_column_width=True)
            st.markdown("</div>", unsafe_allow_html=True)

        with st.expander("参数设置"):
            metric = st.selectbox(
                "相似度度量方法", ValidMetrics, index=0
            )
            threshold = st.slider("相似度阈值", 0.0, 1.0, 0.6)

        if st.button("计算相似度", key="match_faces_btn") and image1 and image2:
            with st.spinner("正在计算相似度..."):
                try:
                    # 准备请求数据
                    img1_base64 = file_to_base64(image1)
                    img2_base64 = file_to_base64(image2)

                    payload = {
                        "image1_base64": img1_base64,
                        "image2_base64": img2_base64,
                        "metric": metric,
                        "threshold": threshold,
                    }

                    # 发送API请求
                    response = requests.post(
                        f"{API_BASE_URL}/match_two_faces", json=payload
                    )
                    result = response.json()

                    # 显示结果
                    match_result = display_json_result(result, "✅ 相似度计算完成！")

                    if match_result:
                        similarity = match_result.get("similarity", 0)
                        is_same_person = match_result.get("is_same_person", False)

                        # 创建结果卡片
                        st.markdown('<div class="card">', unsafe_allow_html=True)
                        st.markdown("### 匹配结果")

                        # 创建仪表盘显示相似度
                        import plotly.graph_objects as go

                        fig = go.Figure(
                            go.Indicator(
                                mode="gauge+number",
                                value=similarity,
                                domain={"x": [0, 1], "y": [0, 1]},
                                title={"text": "相似度"},
                                gauge={
                                    "axis": {"range": [0, 1]},
                                    "bar": {"color": "darkblue"},
                                    "steps": [
                                        {"range": [0, threshold], "color": "lightgray"},
                                        {
                                            "range": [threshold, 1],
                                            "color": "lightgreen",
                                        },
                                    ],
                                    "threshold": {
                                        "line": {"color": "red", "width": 4},
                                        "thickness": 0.75,
                                        "value": threshold,
                                    },
                                },
                            )
                        )

                        fig.update_layout(height=250)
                        st.plotly_chart(fig, use_container_width=True)

                        if is_same_person:
                            st.success("✅ 判断结果: 同一个人")
                        else:
                            st.error("❌ 判断结果: 不是同一个人")

                        st.markdown("</div>", unsafe_allow_html=True)
                except Exception as e:
                    st.error(f"请求失败: {str(e)}")

    # 一对多匹配
    with tabs[1]:
        st.markdown(
            '<div class="sub-header">一对多人脸匹配</div>', unsafe_allow_html=True
        )

        with st.expander("功能说明", expanded=True):
            st.markdown(
                "上传一张源图片和多张目标图片，将源图片与所有目标图片进行匹配，找出最相似的人脸。"
            )

        col1, col2 = st.columns([1, 2])

        with col1:
            st.markdown('<div class="card">', unsafe_allow_html=True)
            st.markdown("### 源图片")
            source_image = st.file_uploader(
                "上传源图片", type=["jpg", "jpeg", "png"], key="batch_source"
            )
            if source_image:
                src_img = Image.open(source_image)
                st.image(src_img, caption="源图片", use_column_width=True)
            st.markdown("</div>", unsafe_allow_html=True)

        with col2:
            st.markdown('<div class="card">', unsafe_allow_html=True)
            st.markdown("### 目标图片集")
            target_images = st.file_uploader(
                "上传目标图片",
                type=["jpg", "jpeg", "png"],
                accept_multiple_files=True,
                key="batch_targets",
            )

            if target_images:
                # 显示上传的目标图片
                cols = st.columns(min(3, len(target_images)))
                for i, img_file in enumerate(target_images):
                    with cols[i % 3]:
                        img = Image.open(img_file)
                        st.image(img, caption=f"目标 #{i + 1}", use_column_width=True)
            st.markdown("</div>", unsafe_allow_html=True)

        with st.expander("参数设置"):
            batch_metric = st.selectbox(
                "相似度度量方法",
                ValidMetrics,
                index=0,
                key="batch_metric",
            )
            batch_threshold = st.slider(
                "相似度阈值", 0.0, 1.0, 0.6, key="batch_threshold"
            )
            top_k = st.slider("返回前K个最相似结果", 1, 10, 3)

        if (
            st.button("开始匹配", key="batch_match_btn")
            and source_image
            and target_images
        ):
            with st.spinner("正在进行一对多匹配..."):
                try:
                    # 准备请求数据
                    src_base64 = file_to_base64(source_image)
                    target_base64_list = [file_to_base64(img) for img in target_images]

                    payload = {
                        "source_image_base64": src_base64,
                        "target_images_base64": target_base64_list,
                        "metric": batch_metric,
                        "threshold": batch_threshold,
                        "top_k": top_k,
                    }

                    # 发送API请求
                    response = requests.post(
                        f"{API_BASE_URL}/match_face_to_targets", json=payload
                    )
                    result = response.json()

                    # 显示结果
                    match_results = display_json_result(result, "✅ 一对多匹配完成！")

                    if match_results and "matches" in match_results:
                        matches = match_results["matches"]

                        # 创建结果卡片
                        st.markdown('<div class="card">', unsafe_allow_html=True)
                        st.markdown("### 匹配结果")

                        if not matches:
                            st.warning("没有找到符合阈值的匹配结果")
                        else:
                            # 创建匹配结果表格
                            match_data = []
                            for i, match in enumerate(matches):
                                match_data.append(
                                    {
                                        "排名": i + 1,
                                        "目标图片": f"目标 #{match['target_index'] + 1}",
                                        "相似度": f"{match['similarity']:.4f}",
                                        "是否匹配": "✅" if match["is_match"] else "❌",
                                    }
                                )

                            match_df = pd.DataFrame(match_data)
                            st.dataframe(match_df, use_container_width=True)

                            # 可视化前三名匹配结果
                            st.markdown("### 可视化匹配结果")
                            vis_cols = st.columns(min(3, len(matches)))

                            for i, (col, match) in enumerate(
                                zip(vis_cols, matches[:3])
                            ):
                                with col:
                                    target_idx = match["target_index"]
                                    if target_idx < len(target_images):
                                        img = Image.open(target_images[target_idx])
                                        st.image(
                                            img,
                                            caption=f"排名 #{i + 1}",
                                            use_column_width=True,
                                        )
                                        st.markdown(
                                            f"相似度: **{match['similarity']:.4f}**"
                                        )
                                        if match["is_match"]:
                                            st.success("匹配成功")
                                        else:
                                            st.error("未匹配")

                        st.markdown("</div>", unsafe_allow_html=True)
                except Exception as e:
                    st.error(f"请求失败: {str(e)}")

# 语音识别
elif selected_nav == "语音识别":
    st.markdown('<div class="main-header">语音识别</div>', unsafe_allow_html=True)

    with st.expander("功能说明", expanded=True):
        st.markdown("""
        上传音频文件，将语音转换为文本。支持以下功能：
        - 多种音频格式（WAV, MP3, M4A等）
        - 长音频自动分段处理
        - 多语言支持
        - 使用Google或Whisper模型进行识别
        """)

    col1, col2 = st.columns([1, 1])

    with col1:
        st.markdown('<div class="card">', unsafe_allow_html=True)
        st.markdown("### 上传音频")
        audio_file = st.file_uploader(
            "选择音频文件", type=["wav", "mp3", "m4a", "ogg"], key="speech_audio"
        )

        if audio_file:
            st.audio(audio_file, format="audio/wav")

            with st.expander("参数设置"):
                engine = st.selectbox(
                    "识别引擎", ["whisper", "google"], index=0, help="选择语音识别引擎"
                )

                language = st.selectbox(
                    "语言",
                    ["zh-CN", "en-US", "ja-JP", "ko-KR", "auto"],
                    index=0,
                    help="选择音频语言，auto表示自动检测",
                )

                chunk_size = st.slider(
                    "分段大小(秒)", 10, 120, 30, help="长音频分段处理的每段长度"
                )

            if st.button("开始识别", key="recognize_speech_btn"):
                with st.spinner("正在进行语音识别..."):
                    try:
                        # 准备请求数据
                        audio_base64 = audio_to_base64(audio_file)

                        payload = {
                            "audio_base64": audio_base64,
                            "engine": engine,
                            "language": language,
                            "chunk_size": chunk_size * 1000,  # 转换为毫秒
                        }

                        # 发送API请求
                        response = requests.post(
                            f"{API_BASE_URL}/extract_speech_text", json=payload
                        )
                        result = response.json()

                        # 显示结果
                        speech_result = display_json_result(result, "✅ 语音识别完成！")

                        if speech_result:
                            st.session_state["speech_result"] = speech_result
                    except Exception as e:
                        st.error(f"请求失败: {str(e)}")
        st.markdown("</div>", unsafe_allow_html=True)

    with col2:
        st.markdown('<div class="card">', unsafe_allow_html=True)
        st.markdown("### 识别结果")

        if "speech_result" in st.session_state and st.session_state["speech_result"]:
            result = st.session_state["speech_result"]

            # 显示完整文本
            st.markdown("#### 完整文本")
            st.text_area("识别文本", result["text"], height=200)

            # 显示分段结果
            if "segments" in result and result["segments"]:
                st.markdown("#### 分段结果")

                segments = result["segments"]
                for i, segment in enumerate(segments):
                    with st.expander(
                        f"段落 {i + 1} ({segment['start_time']}s - {segment['end_time']}s)"
                    ):
                        st.text(segment["text"])

            # 提供下载功能
            if st.button("导出为TXT文件"):
                text_content = result["text"]
                b64 = base64.b64encode(text_content.encode()).decode()
                now = datetime.now().strftime("%Y%m%d_%H%M%S")
                filename = f"speech_text_{now}.txt"
                href = f'<a href="data:file/txt;base64,{b64}" download="{filename}">点击下载 {filename}</a>'
                st.markdown(href, unsafe_allow_html=True)
        else:
            st.info("请上传音频文件并点击识别按钮")
        st.markdown("</div>", unsafe_allow_html=True)

# 视频处理
elif selected_nav == "视频处理":
    st.markdown('<div class="main-header">视频处理</div>', unsafe_allow_html=True)

    tabs = st.tabs(["视频帧提取", "视频文字识别"])

    # 视频帧提取
    with tabs[0]:
        st.markdown('<div class="sub-header">视频帧提取</div>', unsafe_allow_html=True)

        with st.expander("功能说明", expanded=True):
            st.markdown("""
            上传视频文件，提取视频帧。支持以下功能：
            - 按时间间隔提取帧
            - 设置最大帧数
            - 可选保存到MinIO存储
            """)

        st.markdown('<div class="card">', unsafe_allow_html=True)
        video_file = st.file_uploader(
            "上传视频文件",
            type=["mp4", "avi", "mov", "mkv"],
            key="extract_frames_video",
        )

        if video_file:
            # 显示视频预览
            video_bytes = video_file.read()
            st.video(video_bytes)

            with st.expander("参数设置"):
                frame_interval = st.slider(
                    "帧间隔(秒)", 0.1, 10.0, 1.0, step=0.1, help="每隔多少秒提取一帧"
                )

                max_frames = st.slider("最大帧数", 10, 1000, 100, help="最多提取的帧数")

                save_to_minio = st.checkbox(
                    "保存到MinIO", value=False, help="是否将提取的帧保存到MinIO存储"
                )

            if st.button("开始提取", key="extract_frames_btn"):
                with st.spinner("正在提取视频帧..."):
                    try:
                        # 准备请求数据
                        video_base64 = base64.b64encode(video_bytes).decode("utf-8")

                        if save_to_minio:
                            # 使用MinIO接口
                            endpoint = f"{API_BASE_URL}/extract_video_frames_to_minio"
                        else:
                            # 使用普通接口
                            endpoint = f"{API_BASE_URL}/extract_video_frames"

                        payload = {
                            "video_base64": video_base64,
                            "frame_interval": frame_interval,
                            "max_frames": max_frames,
                        }

                        # 发送API请求
                        response = requests.post(endpoint, json=payload)
                        result = response.json()

                        # 显示结果
                        frames_result = display_json_result(
                            result, "✅ 视频帧提取完成！"
                        )

                        if frames_result:
                            st.session_state["frames_result"] = frames_result

                            # 显示提取的帧
                            if "frames" in frames_result:
                                frames = frames_result["frames"]
                                st.markdown(f"### 共提取 {len(frames)} 帧")

                                # 创建网格显示
                                cols_per_row = 4
                                for i in range(0, len(frames), cols_per_row):
                                    cols = st.columns(cols_per_row)
                                    for j, col in enumerate(cols):
                                        idx = i + j
                                        if idx < len(frames):
                                            frame = frames[idx]
                                            with col:
                                                # 解码base64图像
                                                img_data = base64.b64decode(
                                                    frame["image_base64"]
                                                )
                                                img = Image.open(io.BytesIO(img_data))
                                                st.image(
                                                    img,
                                                    caption=f"帧 {frame['frame_number']} ({frame['timestamp']}s)",
                                                    use_column_width=True,
                                                )

                            # 显示MinIO链接
                            if save_to_minio and "minio_urls" in frames_result:
                                urls = frames_result["minio_urls"]
                                with st.expander("MinIO存储链接"):
                                    for i, url in enumerate(urls):
                                        st.markdown(f"{i + 1}. [{url}]({url})")
                    except Exception as e:
                        st.error(f"请求失败: {str(e)}")
        st.markdown("</div>", unsafe_allow_html=True)

    # 视频文字识别
    with tabs[1]:
        st.markdown(
            '<div class="sub-header">视频文字识别</div>', unsafe_allow_html=True
        )

        with st.expander("功能说明", expanded=True):
            st.markdown("""
            上传视频文件，提取视频中的语音并转换为文本。结合了视频处理和语音识别功能。
            """)

        st.markdown('<div class="card">', unsafe_allow_html=True)
        video_text_file = st.file_uploader(
            "上传视频文件", type=["mp4", "avi", "mov", "mkv"], key="extract_video_text"
        )

        if video_text_file:
            # 显示视频预览
            video_text_bytes = video_text_file.read()
            st.video(video_text_bytes)

            with st.expander("参数设置"):
                text_engine = st.selectbox(
                    "识别引擎", ["whisper", "google"], index=0, key="video_text_engine"
                )

                text_language = st.selectbox(
                    "语言",
                    ["zh-CN", "en-US", "ja-JP", "ko-KR", "auto"],
                    index=0,
                    key="video_text_language",
                )

            if st.button("开始识别", key="extract_video_text_btn"):
                with st.spinner("正在从视频中提取文字..."):
                    try:
                        # 准备请求数据
                        video_text_base64 = base64.b64encode(video_text_bytes).decode(
                            "utf-8"
                        )

                        payload = {
                            "video_base64": video_text_base64,
                            "engine": text_engine,
                            "language": text_language,
                        }

                        # 发送API请求
                        response = requests.post(
                            f"{API_BASE_URL}/extract_video_text", json=payload
                        )
                        result = response.json()

                        # 显示结果
                        video_text_result = display_json_result(
                            result, "✅ 视频文字识别完成！"
                        )

                        if video_text_result:
                            # 显示识别文本
                            st.markdown("### 识别结果")
                            st.text_area(
                                "文本内容", video_text_result["text"], height=300
                            )

                            # 显示时间轴
                            if (
                                "segments" in video_text_result
                                and video_text_result["segments"]
                            ):
                                st.markdown("### 时间轴")
                                segments = video_text_result["segments"]

                                # 创建时间轴表格
                                timeline_data = []
                                for segment in segments:
                                    timeline_data.append(
                                        {
                                            "开始时间": f"{segment['start_time']:.2f}s",
                                            "结束时间": f"{segment['end_time']:.2f}s",
                                            "文本内容": segment["text"],
                                        }
                                    )

                                timeline_df = pd.DataFrame(timeline_data)
                                st.dataframe(timeline_df, use_container_width=True)
                    except Exception as e:
                        st.error(f"请求失败: {str(e)}")
        st.markdown("</div>", unsafe_allow_html=True)

# 系统状态
elif selected_nav == "系统状态":
    st.markdown('<div class="main-header">系统状态</div>', unsafe_allow_html=True)

    # 刷新健康状态
    if st.button("刷新状态"):
        st.cache_data.clear()
        health_status = get_health_status()

    col1, col2 = st.columns(2)

    with col1:
        st.markdown('<div class="card">', unsafe_allow_html=True)
        st.markdown("### 服务状态")

        if health_status["code"] == 200:
            status_data = health_status["data"]

            # 状态指示器
            if status_data["status"] == "UP":
                st.success("✅ 服务运行正常")
            else:
                st.error("❌ 服务异常")

            # 服务信息
            st.markdown(f"**版本:** {status_data.get('version', '未知')}")
            st.markdown(f"**启动时间:** {status_data.get('start_time', '未知')}")
            st.markdown(f"**运行时长:** {status_data.get('uptime', '未知')}")

            # 系统信息
            if "system_info" in status_data:
                sys_info = status_data["system_info"]
                st.markdown("### 系统信息")
                st.markdown(f"**操作系统:** {sys_info.get('os', '未知')}")
                st.markdown(f"**Python版本:** {sys_info.get('python_version', '未知')}")
                st.markdown(f"**CPU核心数:** {sys_info.get('cpu_count', '未知')}")
                st.markdown(f"**内存使用:** {sys_info.get('memory_usage', '未知')}")
        else:
            st.error(f"获取服务状态失败: {health_status['message']}")
        st.markdown("</div>", unsafe_allow_html=True)

    with col2:
        st.markdown('<div class="card">', unsafe_allow_html=True)
        st.markdown("### API状态")

        # 测试各个API端点
        endpoints = [
            {"name": "健康检查", "url": "/health", "method": "GET"},
            {"name": "人脸特征提取", "url": "/extract_face_features", "method": "POST"},
            {"name": "人脸匹配", "url": "/match_two_faces", "method": "POST"},
            {"name": "语音识别", "url": "/extract_speech_text", "method": "POST"},
            {"name": "视频帧提取", "url": "/extract_video_frames", "method": "POST"},
        ]

        endpoint_status = []
        for endpoint in endpoints:
            try:
                if endpoint["method"] == "GET":
                    response = requests.get(
                        f"{API_BASE_URL}{endpoint['url']}", timeout=2
                    )
                else:
                    # 只测试连接，不发送实际数据
                    response = requests.options(
                        f"{API_BASE_URL}{endpoint['url']}", timeout=2
                    )

                status = "可用" if response.status_code < 500 else "不可用"
                endpoint_status.append(
                    {
                        "端点": endpoint["name"],
                        "URL": endpoint["url"],
                        "状态": status,
                        "响应码": response.status_code,
                    }
                )
            except Exception:
                endpoint_status.append(
                    {
                        "端点": endpoint["name"],
                        "URL": endpoint["url"],
                        "状态": "不可用",
                        "响应码": "N/A",
                    }
                )

        # 显示端点状态表格
        endpoint_df = pd.DataFrame(endpoint_status)
        st.dataframe(endpoint_df, use_container_width=True)

        # 显示依赖服务状态
        st.markdown("### 依赖服务")

        if health_status["code"] == 200 and "dependencies" in health_status["data"]:
            deps = health_status["data"]["dependencies"]
            for dep_name, dep_status in deps.items():
                if dep_status == "UP":
                    st.success(f"✅ {dep_name}: 正常")
                else:
                    st.error(f"❌ {dep_name}: {dep_status}")
        else:
            st.info("未获取到依赖服务信息")
        st.markdown("</div>", unsafe_allow_html=True)

    # 显示API请求统计
    st.markdown('<div class="card">', unsafe_allow_html=True)
    st.markdown("### API请求统计")

    if health_status["code"] == 200 and "api_stats" in health_status["data"]:
        api_stats = health_status["data"]["api_stats"]

        # 创建API统计数据
        api_data = []
        for endpoint, stats in api_stats.items():
            api_data.append(
                {
                    "端点": endpoint,
                    "总请求数": stats.get("total_requests", 0),
                    "成功请求": stats.get("successful_requests", 0),
                    "失败请求": stats.get("failed_requests", 0),
                    "平均响应时间(ms)": stats.get("avg_response_time", 0),
                }
            )

        if api_data:
            api_df = pd.DataFrame(api_data)
            st.dataframe(api_df, use_container_width=True)

            # 创建请求统计图表
            import plotly.express as px

            # 请求数量柱状图
            fig1 = px.bar(
                api_df,
                x="端点",
                y=["成功请求", "失败请求"],
                title="API请求统计",
                barmode="stack",
            )
            st.plotly_chart(fig1, use_container_width=True)

            # 平均响应时间柱状图
            fig2 = px.bar(
                api_df,
                x="端点",
                y="平均响应时间(ms)",
                title="API平均响应时间",
                color="平均响应时间(ms)",
                color_continuous_scale="Viridis",
            )
            st.plotly_chart(fig2, use_container_width=True)
        else:
            st.info("暂无API请求统计数据")
    else:
        st.info("未获取到API请求统计信息")
    st.markdown("</div>", unsafe_allow_html=True)

    # 系统资源监控
    st.markdown('<div class="card">', unsafe_allow_html=True)
    st.markdown("### 系统资源监控")

    if health_status["code"] == 200 and "resource_usage" in health_status["data"]:
        resources = health_status["data"]["resource_usage"]

        # 创建资源使用图表
        cols = st.columns(3)

        # CPU使用率
        with cols[0]:
            cpu_usage = resources.get("cpu_percent", 0)
            st.markdown("#### CPU使用率")

            # 创建仪表盘
            import plotly.graph_objects as go

            fig = go.Figure(
                go.Indicator(
                    mode="gauge+number",
                    value=cpu_usage,
                    domain={"x": [0, 1], "y": [0, 1]},
                    title={"text": "CPU (%)"},
                    gauge={
                        "axis": {"range": [0, 100]},
                        "bar": {"color": "darkblue"},
                        "steps": [
                            {"range": [0, 50], "color": "lightgreen"},
                            {"range": [50, 80], "color": "orange"},
                            {"range": [80, 100], "color": "red"},
                        ],
                        "threshold": {
                            "line": {"color": "red", "width": 4},
                            "thickness": 0.75,
                            "value": 90,
                        },
                    },
                )
            )

            fig.update_layout(height=250)
            st.plotly_chart(fig, use_container_width=True)

        # 内存使用率
        with cols[1]:
            memory_usage = resources.get("memory_percent", 0)
            st.markdown("#### 内存使用率")

            fig = go.Figure(
                go.Indicator(
                    mode="gauge+number",
                    value=memory_usage,
                    domain={"x": [0, 1], "y": [0, 1]},
                    title={"text": "内存 (%)"},
                    gauge={
                        "axis": {"range": [0, 100]},
                        "bar": {"color": "darkblue"},
                        "steps": [
                            {"range": [0, 50], "color": "lightgreen"},
                            {"range": [50, 80], "color": "orange"},
                            {"range": [80, 100], "color": "red"},
                        ],
                        "threshold": {
                            "line": {"color": "red", "width": 4},
                            "thickness": 0.75,
                            "value": 90,
                        },
                    },
                )
            )

            fig.update_layout(height=250)
            st.plotly_chart(fig, use_container_width=True)

        # 磁盘使用率
        with cols[2]:
            disk_usage = resources.get("disk_percent", 0)
            st.markdown("#### 磁盘使用率")

            fig = go.Figure(
                go.Indicator(
                    mode="gauge+number",
                    value=disk_usage,
                    domain={"x": [0, 1], "y": [0, 1]},
                    title={"text": "磁盘 (%)"},
                    gauge={
                        "axis": {"range": [0, 100]},
                        "bar": {"color": "darkblue"},
                        "steps": [
                            {"range": [0, 50], "color": "lightgreen"},
                            {"range": [50, 80], "color": "orange"},
                            {"range": [80, 100], "color": "red"},
                        ],
                        "threshold": {
                            "line": {"color": "red", "width": 4},
                            "thickness": 0.75,
                            "value": 90,
                        },
                    },
                )
            )

            fig.update_layout(height=250)
            st.plotly_chart(fig, use_container_width=True)

        # 历史资源使用情况
        if "history" in resources and resources["history"]:
            history = resources["history"]

            # 转换历史数据为DataFrame
            history_data = pd.DataFrame(history)

            # 绘制历史趋势图
            st.markdown("#### 资源使用历史趋势")

            fig = px.line(
                history_data,
                x="timestamp",
                y=["cpu_percent", "memory_percent", "disk_percent"],
                labels={
                    "value": "使用率 (%)",
                    "timestamp": "时间",
                    "variable": "资源类型",
                },
                title="系统资源使用趋势",
            )

            fig.update_layout(
                legend=dict(
                    orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1
                )
            )

            st.plotly_chart(fig, use_container_width=True)
    else:
        st.info("未获取到系统资源监控信息")
    st.markdown("</div>", unsafe_allow_html=True)

# 检查是否需要导航到其他页面
if "nav" in st.session_state:
    selected_nav = st.session_state["nav"]
    del st.session_state["nav"]
    st.experimental_rerun()

# 添加页脚
st.markdown("---")
st.markdown(
    """
    <div style="text-align: center; color: #888;">
        媒体信息处理服务 WebUI | 基于 Streamlit 构建
    </div>
    """,
    unsafe_allow_html=True,
)


# 主函数
def main():
    pass


if __name__ == "__main__":
    main()
