"""
Gradio 用户界面
提供基于Web的交互式界面，支持本地和OpenAI模型
"""
import os
import gradio as gr
import requests
from typing import List, Dict, Any, Optional
import json
from pathlib import Path
import yaml

# API配置
API_BASE_URL = "http://localhost:7860/api/v1"
UPLOAD_ENDPOINT = f"{API_BASE_URL}/documents/upload"
SEARCH_ENDPOINT = f"{API_BASE_URL}/search"
ASK_ENDPOINT = f"{API_BASE_URL}/ask"

# 模型配置
def scan_gguf_models():
    """扫描models目录中的GGUF模型文件"""
    models_dir = Path("models")
    if not models_dir.exists():
        models_dir.mkdir()
        print(f"已创建模型目录: {models_dir.absolute()}")
        return []
    
    gguf_files = list(models_dir.glob("*.gguf"))
    return [file.name for file in gguf_files]

# 初始化时扫描模型
GGUF_MODELS = scan_gguf_models()

MODEL_CONFIG = {
    "openai": {
        "name": "OpenAI (GPT-3.5/4)",
        "models": ["gpt-3.5-turbo", "gpt-4"],
        "embedding_models": ["text-embedding-3-small", "text-embedding-3-large"],
        "show_gguf_options": False
    },
    "local": {
        "name": "Hugging Face模型",
        "models": [
            "gpt2", 
            "facebook/opt-350m", 
            "bigscience/bloom-560m",
            "EleutherAI/gpt-neo-125m"
        ],
        "embedding_models": [
            "sentence-transformers/all-MiniLM-L6-v2",
            "BAAI/bge-small-zh-v1.5",
            "moka-ai/m3e-base"
        ],
        "show_gguf_options": False
    },
    "gguf": {
        "name": "GGUF模型 (Llama.cpp)",
        "models": GGUF_MODELS or ["未找到GGUF模型，请将.gguf文件放入models文件夹"],
        "embedding_models": [
            "sentence-transformers/all-MiniLM-L6-v2"
        ],
        "show_gguf_options": True
    }
}

def upload_file(file, metadata_str: str = "{}"):
    """上传文件到后端"""
    try:
        files = {"file": (file.name, open(file.name, "rb"))}
        data = {"metadata": metadata_str}
        
        response = requests.post(
            UPLOAD_ENDPOINT,
            files=files,
            data=data
        )
        response.raise_for_status()
        
        result = response.json()
        return f"文件上传成功！\n文档ID: {result['id']}\n块数量: {len(result['chunks'])}"
    except Exception as e:
        return f"上传文件时出错: {str(e)}"

def search_documents(query: str, top_k: int = 4):
    """搜索文档"""
    try:
        response = requests.post(
            SEARCH_ENDPOINT,
            json={"query": query, "top_k": top_k}
        )
        response.raise_for_status()
        
        results = response.json()
        
        if not results:
            return "未找到相关文档。"
        
        output = "## 搜索结果\n\n"
        for i, result in enumerate(results, 1):
            source = result.get("metadata", {}).get("source", "未知来源")
            score = result.get("score", 0)
            content = result.get("content", "")
            
            output += f"### 结果 #{i} (相似度: {score:.2f}) - {source}\n"
            output += f"{content[:300]}...\n\n"
        
        return output
    except Exception as e:
        return f"搜索时出错: {str(e)}"

def ask_question(question: str, top_k: int = 4, model_type: str = "gguf", model_name: str = None, 
                embedding_model: str = None, n_ctx: int = 4096, n_gpu_layers: int = 0, api_key: str = None):
    """
    提问并获取回答
    
    Args:
        question: 问题
        top_k: 返回结果数量
        model_type: 模型类型 (openai, local, gguf)
        model_name: 模型名称或路径
        embedding_model: 嵌入模型名称
        n_ctx: 上下文长度 (仅GGUF)
        n_gpu_layers: GPU加速层数 (仅GGUF)
        api_key: OpenAI API密钥 (仅OpenAI)
    """
    try:
        # 构建请求数据
        data = {
            "question": question,
            "top_k": top_k,
            "model_type": model_type,
            "embedding_model": embedding_model or MODEL_CONFIG[model_type]["embedding_models"][0]
        }
        
        # 处理模型路径/名称
        if model_type == "gguf":
            # 确保是有效的GGUF模型文件
            if model_name and not model_name.startswith("未找到"):
                model_path = Path("models") / model_name
                if model_path.exists():
                    data["model_name"] = str(model_path.absolute())
                    # 添加GGUF特定参数
                    data.update({
                        "gguf_n_ctx": n_ctx,
                        "gguf_n_gpu_layers": n_gpu_layers
                    })
                else:
                    return f"错误: 找不到模型文件: {model_path}"
            else:
                return "错误: 请先添加GGUF模型到models目录"
        elif model_type == "openai":
            # 设置OpenAI API密钥
            if api_key:
                os.environ["OPENAI_API_KEY"] = api_key
            data["model_name"] = model_name or MODEL_CONFIG["openai"]["models"][0]
        else:  # 本地模型
            data["model_name"] = model_name or MODEL_CONFIG["local"]["models"][0]
        
        # 发送请求
        response = requests.post(
            ASK_ENDPOINT,
            json=data
        )
        response.raise_for_status()
        
        result = response.json()
        answer = result.get("answer", "未获取到回答")
        sources = result.get("sources", [])
        
        # 格式化输出
        output = f"## 回答\n{answer}\n\n"
        
        if sources:
            output += "## 来源\n"
            for i, source in enumerate(sources, 1):
                source_name = source.get("source", f"来源 {i}")
                output += f"{i}. {source_name}\n"
        
        return output
    except requests.exceptions.RequestException as e:
        error_msg = str(e)
        if hasattr(e, 'response') and e.response is not None:
            try:
                error_msg = e.response.json().get("detail", str(e))
            except:
                error_msg = e.response.text or str(e)
        return f"请求API时出错: {error_msg}"
    except Exception as e:
        return f"获取回答时出错: {str(e)}"

def update_model_ui(model_type: str):
    """
    根据选择的模型类型更新UI组件
    """
    if model_type == "openai":
        return [
            gr.Dropdown(
                choices=MODEL_CONFIG["openai"]["models"],
                value=MODEL_CONFIG["openai"]["models"][0],
                label="OpenAI模型",
                interactive=True
            ),
            gr.Dropdown(
                choices=MODEL_CONFIG["openai"]["embedding_models"],
                value=MODEL_CONFIG["openai"]["embedding_models"][0],
                label="嵌入模型",
                interactive=True
            ),
            gr.Textbox(
                label="OpenAI API Key",
                placeholder="输入OpenAI API密钥",
                type="password"
            ),
            gr.Slider(visible=False),
            gr.Slider(visible=False)
        ]
    elif model_type == "gguf":
        # 扫描models目录获取最新的GGUF模型列表
        gguf_models = scan_gguf_models()
        if not gguf_models:
            gguf_models = ["未找到GGUF模型，请将.gguf文件放入models文件夹"]
            interactive = False
        else:
            interactive = True
        
        return [
            gr.Dropdown(
                choices=gguf_models,
                value=gguf_models[0] if gguf_models else "",
                label="选择GGUF模型",
                interactive=interactive
            ),
            gr.Dropdown(
                choices=MODEL_CONFIG["gguf"]["embedding_models"],
                value=MODEL_CONFIG["gguf"]["embedding_models"][0],
                label="选择嵌入模型",
                interactive=interactive
            ),
            gr.Textbox(visible=False),  # OpenAI Key占位
            gr.Slider(
                minimum=512,
                maximum=8192,
                value=4096,
                step=256,
                label="上下文长度 (n_ctx)",
                interactive=interactive
            ) if interactive else gr.Slider(visible=False),
            gr.Slider(
                minimum=0,
                maximum=100,
                value=0,
                step=1,
                label="GPU加速层数 (0=仅CPU)",
                interactive=interactive
            ) if interactive else gr.Slider(visible=False)
        ]
    else:  # 本地模型
        return [
            gr.Dropdown(
                choices=MODEL_CONFIG["local"]["models"],
                value=MODEL_CONFIG["local"]["models"][0],
                label="本地模型",
                interactive=True
            ),
            gr.Dropdown(
                choices=MODEL_CONFIG["local"]["embedding_models"],
                value=MODEL_CONFIG["local"]["embedding_models"][0],
                label="嵌入模型",
                interactive=True
            ),
            gr.Textbox(visible=False),  # OpenAI Key占位
            gr.Slider(visible=False),
            gr.Slider(visible=False)
        ]

def create_ui():
    """创建Gradio用户界面"""
    with gr.Blocks(title="个人知识库系统") as demo:
        gr.Markdown("# 个人知识库系统 (RAG)")
        
        with gr.Tabs():
            with gr.TabItem("知识库管理"):
                with gr.Row():
                    with gr.Column(scale=1):
                        file_upload = gr.File(
                            label="上传文档",
                            file_types=[".txt", ".pdf", ".docx", ".pptx"],
                            file_count="multiple"
                        )
                        upload_btn = gr.Button("上传并处理")
                        
                        gr.Markdown("### 文档列表")
                        document_list = gr.Dataframe(
                            headers=["文件名", "大小", "上传时间"],
                            interactive=False,
                            label="已上传文档"
                        )
                        refresh_btn = gr.Button("刷新列表")
                        
                    with gr.Column(scale=2):
                        search_query = gr.Textbox(label="搜索文档", placeholder="输入关键词搜索...")
                        search_btn = gr.Button("搜索")
                        search_results = gr.Dataframe(
                            headers=["文档", "相似度", "内容片段"],
                            interactive=False,
                            label="搜索结果"
                        )
            
            with gr.TabItem("智能问答"):
                with gr.Row():
                    with gr.Column(scale=1):
                        # 模型类型选择
                        model_type = gr.Radio(
                            label="模型类型",
                            choices=["OpenAI", "Hugging Face模型", "GGUF模型 (Llama.cpp)"],
                            value="GGUF模型 (Llama.cpp)",
                            interactive=True
                        )
                        
                        # 模型选择器
                        model_selector = gr.Dropdown(
                            label="选择模型",
                            interactive=True
                        )
                        
                        # 嵌入模型选择器
                        embedding_model = gr.Dropdown(
                            label="嵌入模型",
                            interactive=True
                        )
                        
                        # OpenAI API密钥
                        openai_key = gr.Textbox(
                            label="OpenAI API Key",
                            placeholder="输入OpenAI API密钥",
                            type="password",
                            visible=False
                        )
                        
                        # GGUF模型参数
                        n_ctx = gr.Slider(
                            minimum=512,
                            maximum=8192,
                            value=4096,
                            step=256,
                            label="上下文长度 (n_ctx)",
                            interactive=True,
                            visible=False
                        )
                        
                        n_gpu_layers = gr.Slider(
                            minimum=0,
                            maximum=100,
                            value=0,
                            step=1,
                            label="GPU加速层数 (0=仅CPU)",
                            interactive=True,
                            visible=False
                        )
                        
                        rag_top_k = gr.Slider(
                            minimum=1,
                            maximum=10,
                            value=4,
                            step=1,
                            label="返回结果数量"
                        )
                        
                    with gr.Column(scale=2):
                        question_input = gr.Textbox(
                            label="输入您的问题",
                            placeholder="输入您的问题，例如：什么是机器学习？",
                            lines=5
                        )
                        ask_btn = gr.Button("提问")
                        answer_output = gr.Markdown(
                            "## 回答\n在这里查看AI的回答...",
                            show_label=False
                        )
        
        # 事件处理
        upload_btn.click(
            fn=upload_file,
            inputs=[file_upload],
            outputs=[document_list]
        )
        
        refresh_btn.click(
            fn=list_documents,
            inputs=[],
            outputs=[document_list]
        )
        
        search_btn.click(
            fn=search_documents,
            inputs=[search_query],
            outputs=[search_results]
        )
        
        # 动态更新模型UI
        model_type.change(
            fn=update_model_ui,
            inputs=[model_type],
            outputs=[model_selector, embedding_model, openai_key, n_ctx, n_gpu_layers]
        )
        
        ask_btn.click(
            fn=ask_question,
            inputs={
                "question": question_input,
                "top_k": rag_top_k,
                "model_type": model_type,
                "model_name": model_selector,
                "embedding_model": embedding_model,
                "n_ctx": n_ctx,
                "n_gpu_layers": n_gpu_layers,
                "api_key": openai_key
            },
            outputs=answer_output
        )
        
        # 初始化文档列表
        demo.load(
            fn=list_documents,
            inputs=[],
            outputs=[document_list],
            queue=False
        )
        
        # 初始化模型UI
        demo.load(
            fn=lambda: update_model_ui("GGUF模型 (Llama.cpp)"),
            inputs=[],
            outputs=[model_selector, embedding_model, openai_key, n_ctx, n_gpu_layers],
            queue=False
        )
    
    return demo

def list_documents():
    """获取文档列表"""
    try:
        response = requests.get(f"{API_BASE_URL}/documents")
        response.raise_for_status()
        documents = response.json()
        
        # 格式化文档列表
        formatted_docs = []
        for doc in documents:
            formatted_docs.append([
                doc.get("name", "未知文件"),
                f"{doc.get('size', 0) / 1024:.2f} KB",
                doc.get("upload_time", "未知时间")
            ])
        
        return formatted_docs
    except Exception as e:
        print(f"获取文档列表失败: {str(e)}")
        return []

# 创建并启动应用
def main():
    # 确保models目录存在
    if not os.path.exists("models"):
        os.makedirs("models")
        print("已创建models目录，请将GGUF模型文件放入此目录")
    
    # 创建并启动UI
    demo = create_ui()
    demo.launch(server_name="0.0.0.0", server_port=7861, share=False)

if __name__ == "__main__":
    main()
