import logging
import os
from typing import List, Dict

import gradio as gr
import httpx
from langchain.agents import AgentExecutor, create_openai_tools_agent
from langchain.callbacks import FileCallbackHandler
from langchain.tools import Tool
from langchain_community.llms import Ollama
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import ChatOpenAI
from pymilvus import (
    connections,
    utility,
    FieldSchema,
    CollectionSchema,
    DataType,
    Collection,
)
from sentence_transformers import SentenceTransformer
from tenacity import retry, stop_after_attempt, wait_exponential

# === 配置参数 ===
os.environ["LANGUAGE"] = "zh_CN"
DEEPSEEK_BASE_URL = "https://api.modelarts-maas.com/v1"
MODEL_NAME = "DeepSeek-V3"
OLLAMA_MODEL = "llama3"  # 备用模型
REQUEST_TIMEOUT = 10  # 请求超时时间(秒)
MILVUS_HOST = "192.168.10.141"
MILVUS_PORT = "7530"
KNOWLEDGE_COLLECTION = "knowledge_base"
EMBEDDING_MODEL_PATH = r'C:\local_model'

# 配置日志
logging.basicConfig(filename='knowledge_agent.log', level=logging.INFO)
file_callback = FileCallbackHandler('knowledge_agent.log')


# === 1. Milvus知识库管理 ===
class KnowledgeBaseManager:
    def __init__(self):
        # 初始化嵌入模型
        self.model = SentenceTransformer(EMBEDDING_MODEL_PATH)

        # 连接Milvus
        self.connect_to_milvus()

        # 创建或加载集合
        if not self.check_collection_exists():
            self.create_collection()
        self.collection = Collection(KNOWLEDGE_COLLECTION)
        self.collection.load()

    def connect_to_milvus(self):
        """连接到Milvus服务器"""
        connections.connect(
            alias="default",
            host=MILVUS_HOST,
            port=MILVUS_PORT
        )
        logging.info("成功连接到Milvus服务器")

    def check_collection_exists(self):
        """检查集合是否存在"""
        return utility.has_collection(KNOWLEDGE_COLLECTION)

    def create_collection(self):
        """创建知识库集合"""
        # 定义字段
        fields = [
            FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
            FieldSchema(name="content", dtype=DataType.VARCHAR, max_length=65535),
            FieldSchema(name="source", dtype=DataType.VARCHAR, max_length=500),
            FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=384)
        ]

        # 创建schema
        schema = CollectionSchema(
            fields=fields,
            description="知识库文档集合"
        )

        # 创建集合
        collection = Collection(
            name=KNOWLEDGE_COLLECTION,
            schema=schema,
            using='default',
            shards_num=2
        )

        # 创建索引
        index_params = {
            "index_type": "IVF_FLAT",
            "metric_type": "L2",
            "params": {"nlist": 128}
        }

        collection.create_index(
            field_name="embedding",
            index_params=index_params
        )
        logging.info(f"创建新集合: {KNOWLEDGE_COLLECTION}")

    def embed_text(self, text: str) -> List[float]:
        """文本向量化"""
        return self.model.encode(text).tolist()

    def add_knowledge(self, documents: List[Dict[str, str]]):
        """添加知识文档"""
        processed_docs = []
        max_length = 65535  # 必须与字段定义一致

        for doc in documents:
            content = doc["content"]
            # 严格长度检查
            if len(content) > max_length:
                original_length = len(content)
                content = content[:max_length]
                logging.warning(
                    f"文档截断: {doc.get('source', '未知')} "
                    f"原长度: {original_length} -> 截断后: {len(content)}"
                )

            processed_docs.append({
                "content": content,
                "source": doc.get("source", "unknown")
            })

        # 插入前再次验证
        for doc in processed_docs:
            if len(doc["content"]) > max_length:
                raise ValueError(f"文档长度检查失败: {len(doc['content'])} > {max_length}")


    def search_knowledge(self, query: str, top_k: int = 3) -> List[Dict]:
        """搜索相关知识"""
        query_vector = self.embed_text(query)

        search_params = {
            "metric_type": "L2",
            "params": {"nprobe": 16}
        }

        results = self.collection.search(
            data=[query_vector],
            anns_field="embedding",
            param=search_params,
            limit=top_k,
            output_fields=['content', 'source']
        )

        knowledge = []
        for hits in results:
            for hit in hits:
                knowledge.append({
                    "content": hit.entity.get('content'),
                    "source": hit.entity.get('source'),
                    "similarity": 1 - hit.score
                })

        # 按相似度排序
        knowledge.sort(key=lambda x: x['similarity'], reverse=True)
        return knowledge

    def close(self):
        """释放资源"""
        self.collection.release()
        connections.disconnect("default")
        logging.info("Milvus连接已关闭")



# === 2. 初始化知识库 ===
# === 新增配置 ===
KNOWLEDGE_FOLDER = "knowledge_docs"  # 知识库文档存放路径

MAX_CONTENT_LENGTH = 20000  # 与Milvus字段定义保持一致


def init_knowledge_base():
    """初始化知识库"""
    from document_processor import DocumentProcessor

    print("正在初始化知识库...")


    # 重新创建集合
    knowledge_base = KnowledgeBaseManager()

    # 处理文档
    documents = DocumentProcessor.process_folder(KNOWLEDGE_FOLDER, max_chars=15000)

    if not documents:
        print(f"警告: 知识库文件夹 {KNOWLEDGE_FOLDER} 中没有可用的文档")
        documents = [{
            "content": "系统初始化完成，请添加文档到knowledge_docs文件夹",
            "source": "system"
        }]

    print(f"找到 {len(documents)} 个文档块，正在添加到向量数据库...")

    # 分批插入防止内存问题
    batch_size = 100
    for i in range(0, len(documents), batch_size):
        batch = documents[i:i + batch_size]
        knowledge_base.add_knowledge(batch)
        print(f"已插入 {min(i + batch_size, len(documents))}/{len(documents)} 条文档")

    print("知识库初始化完成")
    return knowledge_base


# 初始化知识库
knowledge_base = init_knowledge_base()

# === 3. 问答系统核心 ===
client = httpx.Client(timeout=REQUEST_TIMEOUT)


@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=2, max=10))
def init_llm() -> ChatOpenAI:
    """初始化LLM"""
    llm = ChatOpenAI(
        model=MODEL_NAME,
        temperature=0.7,
        base_url=DEEPSEEK_BASE_URL,
        http_client=client,
        max_retries=2
    )
    llm.invoke("Ping")
    return llm


def generate_answer(query: str, context: str) -> str:
    """基于上下文生成回答"""
    try:
        llm = init_llm()
        prompt = f"""基于以下上下文回答问题:

        上下文:
        {context}

        问题: {query}

        要求:
        1. 只使用提供的上下文信息回答
        2. 如果上下文不包含答案，回答"我不知道"
        3. 回答要简洁专业"""

        response = llm.invoke(prompt)
        return response.content
    except Exception:
        # 备用方案
        return f"根据知识库信息: {context.split('.')[0]}..."


# === 4. 问答工具 ===
def knowledge_qa(query: str) -> str:
    """知识库问答工具"""
    # 搜索相关知识
    knowledge = knowledge_base.search_knowledge(query, top_k=2)

    if not knowledge:
        return "没有找到相关知识"

    # 组合相关知识
    context = "\n".join([
        f"来源: {k['source']}\n内容: {k['content']}\n相关度: {k['similarity']:.2f}"
        for k in knowledge
    ])

    # 生成回答
    return generate_answer(query, context)


tools = [
    Tool(
        name="Knowledge_QA",
        func=knowledge_qa,
        description="从本地知识库获取答案。输入应为明确的问题。"
    ),
    Tool(
        name="Calculator",
        func=lambda x: str(eval(x)),
        description="计算数学表达式，如 '(3 + 5) * 2'。仅支持基础运算。"
    )
]


# === 5. 创建Agent ===
def create_agent() -> AgentExecutor:
    try:
        llm = init_llm()
        print("DeepSeek 服务已连接")
    except Exception as e:
        print(f"DeepSeek 不可用: {str(e)}, 使用备用模型 {OLLAMA_MODEL}")
        llm = Ollama(model=OLLAMA_MODEL)

    prompt = ChatPromptTemplate.from_messages([
        ("system", "你是知识问答助手，优先使用知识库工具回答问题。"),
        ("user", "{input}"),
        MessagesPlaceholder("agent_scratchpad"),
    ])

    agent = create_openai_tools_agent(llm, tools, prompt)
    return AgentExecutor(
        agent=agent,
        tools=tools,
        verbose=True,
        handle_parsing_errors=True,
        max_iterations=3
    )


agent_executor = create_agent()


# === 6. Gradio界面 ===
def chat_interface(query: str, chat_history: list) -> tuple[str, list]:
    if not query.strip():
        return "", chat_history + [(query, "请输入有效问题")]

    try:
        response = agent_executor.invoke(
            {"input": query},
            config={"callbacks": [file_callback]}
        )
        return "", chat_history + [(query, response["output"])]
    except Exception as e:
        return "", chat_history + [(query, f"处理错误: {str(e)}")]


# === 修改Gradio界面部分 ===
with gr.Blocks(title="知识问答系统", css=".gradio-container {max-width: 800px !important}") as demo:
    gr.Markdown("""
    ## 📚 智能知识管理系统
    *基于Milvus向量数据库和LLM的知识问答*
    """)

    with gr.Tab("知识问答"):
        chatbot = gr.Chatbot(height=400, label="对话历史")
        msg = gr.Textbox(label="输入问题", placeholder="输入您的问题后按回车...", lines=3)
        clear_btn = gr.Button("清空对话", variant="secondary")

        msg.submit(
            chat_interface,
            inputs=[msg, chatbot],
            outputs=[msg, chatbot]
        )
        clear_btn.click(lambda: None, None, chatbot)

    with gr.Tab("知识库管理"):
        with gr.Row():
            upload_btn = gr.UploadButton(
                "上传文档",
                file_types=[".txt", ".pdf", ".docx", ".csv"],
                file_count="multiple"
            )
            refresh_btn = gr.Button("刷新知识库", variant="primary")

        with gr.Row():
            doc_viewer = gr.Dataframe(
                headers=["来源", "内容片段"],
                interactive=False,
                wrap=True
            )


        def upload_files(files):
            from document_processor import DocumentProcessor
            docs = []

            for file in files:
                try:
                    # 临时保存文件
                    temp_path = os.path.join(KNOWLEDGE_FOLDER, f"temp_{os.path.basename(file.name)}")
                    with open(temp_path, 'wb') as f:
                        f.write(file.read())

                    # 处理内容
                    content = DocumentProcessor.read_file(temp_path)
                    if content:
                        cleaned = DocumentProcessor.clean_text(content)
                        chunks = DocumentProcessor.chunk_text(cleaned, max_chars=15000)

                        # 最终保存
                        save_path = os.path.join(KNOWLEDGE_FOLDER, os.path.basename(file.name))
                        os.rename(temp_path, save_path)

                        docs.extend([{
                            "content": chunk,
                            "source": f"{os.path.splitext(file.name)[0]}_part{i + 1}"
                        } for i, chunk in enumerate(chunks)])

                except Exception as e:
                    logging.error(f"处理文件 {file.name} 失败: {str(e)}")
                    continue

            # 分批插入
            if docs:
                batch_size = 20  # 更小的批次
                success_count = 0
                for i in range(0, len(docs), batch_size):
                    try:
                        knowledge_base.add_knowledge(docs[i:i + batch_size])
                        success_count += len(docs[i:i + batch_size])
                    except Exception as e:
                        logging.error(f"插入批次 {i}-{i + batch_size} 失败: {str(e)}")

                return {
                    "total": len(docs),
                    "success": success_count,
                    "samples": [[d["source"], d["content"][:100] + "..."] for d in docs[:10]]
                }
            return {"status": "没有可添加的文档"}


        def refresh_docs():
            docs = knowledge_base.collection.query(
                expr="id >= 0",
                output_fields=["source", "content"],
                limit=50
            )
            return [[d["source"], d["content"][:100] + "..."] for d in docs]


        upload_btn.upload(upload_files, upload_btn, doc_viewer)
        refresh_btn.click(refresh_docs, outputs=doc_viewer)
        demo.load(refresh_docs, outputs=doc_viewer)


def check_system_health():
    """系统健康检查"""
    status = {
        "Milvus连接": False,
        "知识库文档数": 0,
        "LLM服务": "未知"
    }

    # 检查Milvus
    try:
        status["Milvus连接"] = utility.list_collections() is not None
        count = knowledge_base.collection.query(
            expr="id >= 0",
            output_fields=["count(*)"]
        )
        status["知识库文档数"] = count[0]["count(*)"]
    except:
        pass

    # 检查LLM
    try:
        init_llm()
        status["LLM服务"] = "DeepSeek可用"
    except:
        try:
            Ollama(model=OLLAMA_MODEL).invoke("Ping")
            status["LLM服务"] = "Ollama可用"
        except:
            status["LLM服务"] = "不可用"

    return status


# 在Gradio界面中添加健康检查按钮
def optimize_collection():
    """优化集合"""
    knowledge_base.collection.compact()
    knowledge_base.collection.load()
    print("集合优化完成")

# 在Gradio界面中添加优化按钮
with demo:
    with gr.Accordion("高级管理", open=False):
        optimize_btn = gr.Button("优化集合")
        optimize_btn.click(
            lambda: optimize_collection(),
            outputs=None
        )


# === 7. 启动应用 ===
# === 修改主程序启动部分 ===
if __name__ == "__main__":
    try:
        # 检查知识库文件夹
        if not os.path.exists(KNOWLEDGE_FOLDER):
            os.makedirs(KNOWLEDGE_FOLDER)
            print(f"已创建知识库文件夹: {KNOWLEDGE_FOLDER}")

        demo.launch(
            server_name="0.0.0.0",
            server_port=7860,
            share=False,
            show_error=True
        )
    except Exception as e:
        print(f"系统启动失败: {str(e)}")
    finally:
        knowledge_base.close()
        client.close()
        print("资源已释放")