# -*- coding:utf-8 -*-

import os
import re
import json
import requests
import logging
from typing import List, Dict, Tuple, Optional
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.document_loaders import (
    TextLoader,
    PyPDFLoader,
    Docx2txtLoader,
    UnstructuredMarkdownLoader,
    WebBaseLoader
)
from langchain.schema import Document

# from script.utils.logger_config import logger
from config.config import settings


def get_siliconflow_models(limit=20):
    """
    获取 SiliconFlow 平台上的模型列表

    参数:
        api_key: SiliconFlow API 密钥
        limit: 返回结果数量
    """
    url = settings.openaiConfig.models_url
    headers = {
        "Authorization": f"Bearer {settings.openaiConfig.api_key}",
        "Content-Type": "application/json"
    }

    try:
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        data = response.json()

        if "data" not in data:
            return {"error": f"API返回格式异常: {data}"}

        models = []
        for model in data["data"][:limit]:
            models.append({
                "id": model["id"],
                "name": model.get("name", model["id"]),
                "description": model.get("description", ""),
                "created_at": model.get("created", ""),
                "capabilities": model.get("capabilities", []),
                "owner": model.get("owned_by", "siliconflow"),
                "context_length": model.get("context_length", 4096)
            })

        return models
    except Exception as e:
        return {"error": f"获取模型列表失败: {str(e)}"}


class FinancialQAAssistant:
    def __init__(self):
        self.config = self._load_config()
        self._init_logging()
        self._init_components()
        self.conversation_history = []
        self._ensure_directories()
        self.logger.info("金融问答助手初始化完成")

    def _load_config(self) -> dict:
        config = {}
        # 设置默认值
        config.setdefault('system', {})
        config['system'].setdefault('prompt', "你是一个金融领域的专业助手")
        config['system'].setdefault('max_history', 20)
        config['system'].setdefault('temperature', 0.7)

        config.setdefault('text_processing', {})
        config['text_processing'].setdefault('chunk_size', 1000)
        config['text_processing'].setdefault('chunk_overlap', 200)
        config['text_processing'].setdefault('allowed_extensions', [".txt", ".pdf", ".docx", ".md"])

        config.setdefault('paths', {})
        config['paths'].setdefault('documents', "./documents")
        config['paths'].setdefault('conversations', "./conversations")
        config['paths'].setdefault('vector_stores', "./vector_stores")

        config.setdefault('logging', {})
        config['logging'].setdefault('level', "INFO")
        config['logging'].setdefault('file', "assistant.log")

        config.setdefault('api', {})
        config['api'].setdefault('base_url', settings.openaiConfig.base_url)
        config['api'].setdefault('api_key', settings.openaiConfig.api_key)
        config['api'].setdefault('model', settings.openaiConfig.model)
        config['api'].setdefault('embedding_model', settings.openaiConfig.embedding_model)
        config['api'].setdefault('embedding_chunk_size', settings.openaiConfig.embedding_chunk_size)

        return config

    def _init_logging(self):
        """初始化日志系统"""
        log_level = getattr(logging, self.config['logging']['level'].upper(), logging.INFO)
        log_file = self.config['logging']['file']

        logging.basicConfig(
            level=log_level,
            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
            handlers=[
                logging.FileHandler(log_file),
                logging.StreamHandler()
            ]
        )
        self.logger = logging.getLogger("FinancialQAAssistant")
        self.logger.info("日志系统初始化完成")


    def _init_components(self):
        """初始化LangChain组件"""
        # 文本分割器
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=self.config['text_processing']['chunk_size'],
            chunk_overlap=self.config['text_processing']['chunk_overlap']
        )

        # 初始化内存
        self.memory = ConversationBufferMemory(
            memory_key="chat_history",
            return_messages=True,
            output_key='answer'
        )

        # 初始化向量存储
        self.vector_store = None

        # 文件加载器映射
        self.file_loaders = {
            ".txt": TextLoader,
            ".pdf": PyPDFLoader,
            ".docx": Docx2txtLoader,
            ".md": UnstructuredMarkdownLoader
        }

        # 初始化语言模型
        self.llm = ChatOpenAI(
            openai_api_base=self.config['api']['base_url'],
            openai_api_key=self.config['api']['api_key'],
            model_name=self.config['api']['model'],
            temperature=self.config['system']['temperature']
        )

        # 初始化嵌入模型
        self.embeddings = OpenAIEmbeddings(
            openai_api_base=self.config['api']['base_url'],
            openai_api_key=self.config['api']['api_key'],
            model=self.config['api']['embedding_model'],
            chunk_size = self.config['api']['embedding_chunk_size']
        )

        # 初始化问答链
        self.qa_chain = None

    def _ensure_directories(self):
        """确保必要的目录存在"""
        os.makedirs(self.config['paths']['documents'], exist_ok=True)
        os.makedirs(self.config['paths']['conversations'], exist_ok=True)
        os.makedirs(self.config['paths']['vector_stores'], exist_ok=True)
        self.logger.info(f"文档目录: {self.config['paths']['documents']}")
        self.logger.info(f"对话目录: {self.config['paths']['conversations']}")
        self.logger.info(f"向量存储目录: {self.config['paths']['vector_stores']}")

    def _get_loader_for_file(self, file_path: str):
        """根据文件扩展名获取合适的加载器"""
        ext = os.path.splitext(file_path)[1].lower()
        if ext in self.file_loaders:
            return self.file_loaders[ext]
        raise ValueError(f"不支持的文件类型: {ext}")

    def _create_custom_prompt(self) -> PromptTemplate:
        """创建自定义提示词模板"""
        template = """
        你是一个专业的金融分析师助手，专门处理上市公司年报、财务报告和金融数据分析。
        请根据以下上下文和对话历史，专业、准确地回答用户的问题。
        如果问题与金融无关，请礼貌地说明你只回答金融相关问题。
        如果上下文不足以回答问题，请说明你需要更多信息，不要编造答案。

        上下文:
        {context}

        对话历史:
        {chat_history}

        问题: {question}

        专业回答:
        """
        return PromptTemplate(
            input_variables=["context", "chat_history", "question"],
            template=template
        )

    def _create_financial_analysis_prompt(self) -> PromptTemplate:
        """创建财务分析专用提示词模板"""
        template = """
        [角色] 你是一位资深财务分析师，正在分析上市公司年报。
        [任务] 根据提供的财务报告片段，回答用户的问题。
        [要求]
        1. 回答必须基于提供的上下文，不要编造信息
        2. 涉及财务数据时，必须注明数据来源的页码或章节
        3. 使用专业术语但保持解释清晰
        4. 比较数据时，提供同比或环比分析
        5. 识别潜在风险点并标注

        上下文:
        {context}

        对话历史:
        {chat_history}

        用户问题: {question}

        专业分析:
        """
        return PromptTemplate(
            input_variables=["context", "chat_history", "question"],
            template=template
        )

    def load_document(self, file_path: str) -> List[Document]:
        """加载文档内容"""
        try:
            # 检查文件类型
            ext = os.path.splitext(file_path)[1].lower()
            allowed_ext = self.config['text_processing']['allowed_extensions']
            if ext not in allowed_ext:
                raise ValueError(f"不支持的文件类型: {ext}。支持的类型: {', '.join(allowed_ext)}")

            # 获取合适的加载器
            loader_class = self._get_loader_for_file(file_path)
            loader = loader_class(file_path)

            # 加载文档
            documents = loader.load()
            self.logger.info(f"成功加载文档: {file_path}, 页数: {len(documents)}")
            return documents
        except Exception as e:
            self.logger.error(f"文档加载失败: {str(e)}", exc_info=True)
            raise

    def load_web_content(self, url: str) -> List[Document]:
        """从网页加载内容"""
        try:
            loader = WebBaseLoader(url)
            documents = loader.load()
            self.logger.info(f"成功加载网页内容: {url}, 内容块数: {len(documents)}")
            return documents
        except Exception as e:
            self.logger.error(f"网页内容加载失败: {str(e)}", exc_info=True)
            raise

    def process_documents(self, documents: List[Document]) -> List[Document]:
        """处理文档内容"""
        try:
            # 分割文本
            chunks = self.text_splitter.split_documents(documents)
            self.logger.info(f"分割为 {len(chunks)} 个文本块")
            return chunks
        except Exception as e:
            self.logger.error(f"文档处理失败: {str(e)}", exc_info=True)
            raise

    def create_vector_store(self, documents: List[Document], store_name: str = None):
        """创建向量存储"""
        try:
            if not store_name:
                store_name = "default_vector_store"

            # 创建向量存储
            self.vector_store = FAISS.from_documents(documents, self.embeddings)
            self.logger.info("向量存储创建成功")

            # 保存向量存储
            store_path = os.path.join(self.config['paths']['vector_stores'], f"{store_name}.faiss")
            self.vector_store.save_local(store_path)
            self.logger.info(f"向量存储已保存到: {store_path}")

            return True, f"向量存储创建成功: {store_name}"
        except Exception as e:
            self.logger.error(f"向量存储创建失败: {str(e)}", exc_info=True)
            return False, f"向量存储创建失败: {str(e)}"

    def load_vector_store(self, store_name: str):
        """加载已有的向量存储"""
        try:
            store_path = os.path.join(self.config['paths']['vector_stores'], f"{store_name}.faiss")
            if not os.path.exists(store_path):
                raise FileNotFoundError(f"向量存储不存在: {store_name}")

            self.vector_store = FAISS.load_local(
                store_path,
                self.embeddings,
                allow_dangerous_deserialization=True
            )
            self.logger.info(f"向量存储加载成功: {store_name}")
            return True, f"向量存储加载成功: {store_name}"
        except Exception as e:
            self.logger.error(f"向量存储加载失败: {str(e)}", exc_info=True)
            return False, f"向量存储加载失败: {str(e)}"

    def init_qa_chain(self, prompt_type: str = "default"):
        """初始化问答链"""
        try:
            if not self.vector_store:
                raise ValueError("请先创建或加载向量存储")

            # 选择提示词模板
            if prompt_type == "financial_analysis":
                prompt = self._create_financial_analysis_prompt()
            else:
                prompt = self._create_custom_prompt()

            # 创建问答链
            self.qa_chain = ConversationalRetrievalChain.from_llm(
                self.llm,
                self.vector_store.as_retriever(search_kwargs={"k": 4}),
                memory=self.memory,
                combine_docs_chain_kwargs={"prompt": prompt},
                return_source_documents=True
            )
            self.logger.info("问答链初始化完成")
            return True, "问答链初始化完成"
        except Exception as e:
            self.logger.error(f"问答链初始化失败: {str(e)}", exc_info=True)
            return False, f"问答链初始化失败: {str(e)}"

    def ask_question(self, question: str) -> Tuple[str, List[Document]]:
        """向问答链提问"""
        try:
            if not self.qa_chain:
                raise ValueError("请先初始化问答链")

            # 执行查询
            result = self.qa_chain({"question": question})
            answer = result['answer']
            source_docs = result['source_documents']

            # 添加到对话历史
            self.conversation_history.append({"role": "user", "content": question})
            self.conversation_history.append({"role": "assistant", "content": answer})

            # 限制历史记录长度
            max_history = self.config['system']['max_history']
            if len(self.conversation_history) > max_history * 2:
                self.conversation_history = self.conversation_history[-max_history * 2:]

            self.logger.info(f"问题: {question}")
            self.logger.info(f"回答: {answer[:100]}...")

            return answer, source_docs
        except Exception as e:
            self.logger.error(f"提问失败: {str(e)}", exc_info=True)
            return f"提问失败: {str(e)}", []

    def direct_chat(self, message: str, stream: bool = False) -> str:
        """直接与模型对话（不使用文档）"""
        try:
            # 添加到对话历史
            self.conversation_history.append({"role": "user", "content": message})

            # 限制历史记录长度
            max_history = self.config['system']['max_history']
            if len(self.conversation_history) > max_history * 2:
                self.conversation_history = self.conversation_history[-max_history * 2:]

            # 准备消息
            messages = [
                           {"role": "system", "content": self.config['system']['prompt']}
                       ] + self.conversation_history

            # 调用API
            response = self.llm.predict_messages(messages)
            answer = response.content

            # 添加到对话历史
            self.conversation_history.append({"role": "assistant", "content": answer})

            self.logger.info(f"直接对话 - 问题: {message}")
            self.logger.info(f"直接对话 - 回答: {answer[:100]}...")

            return answer
        except Exception as e:
            self.logger.error(f"直接对话失败: {str(e)}", exc_info=True)
            return f"直接对话失败: {str(e)}"

    def save_conversation(self, file_name: str = None) -> Tuple[bool, str]:
        """保存对话历史到文件"""
        try:
            if not file_name:
                file_name = f"conversation_{len(self.conversation_history)}.json"

            file_path = os.path.join(self.config['paths']['conversations'], file_name)

            with open(file_path, 'w', encoding='utf-8') as f:
                json.dump(self.conversation_history, f, ensure_ascii=False, indent=2)

            self.logger.info(f"对话已保存到: {file_path}")
            return True, f"对话已保存到: {file_path}"
        except Exception as e:
            self.logger.error(f"保存失败: {str(e)}", exc_info=True)
            return False, f"保存失败: {str(e)}"

    def load_conversation(self, file_name: str) -> Tuple[bool, str]:
        """从文件加载对话历史"""
        try:
            file_path = os.path.join(self.config['paths']['conversations'], file_name)
            if not os.path.exists(file_path):
                raise FileNotFoundError(f"对话文件不存在: {file_name}")

            with open(file_path, 'r', encoding='utf-8') as f:
                self.conversation_history = json.load(f)

            self.logger.info(f"对话历史已加载: {file_name}")
            return True, f"对话历史已加载: {file_name}"
        except Exception as e:
            self.logger.error(f"加载失败: {str(e)}", exc_info=True)
            return False, f"加载失败: {str(e)}"

    def clear_conversation(self):
        """清空对话历史"""
        self.conversation_history = []
        self.memory.clear()
        self.logger.info("对话历史已清空")

    def list_vector_stores(self) -> List[str]:
        """列出所有向量存储"""
        vector_dir = self.config['paths']['vector_stores']
        return [f.replace(".faiss", "") for f in os.listdir(vector_dir)
                if f.endswith(".faiss") and os.path.isfile(os.path.join(vector_dir, f))]

    def get_document_insights(self, question: str) -> Dict:
        """获取文档洞察（带来源）"""
        try:
            if not self.qa_chain:
                raise ValueError("请先初始化问答链")

            result = self.qa_chain({"question": question})
            answer = result['answer']
            source_docs = result['source_documents']

            # 提取来源信息
            sources = []
            # for doc in source_docs:
            #     source_info = {
            #         "content": doc.page_content[:200] + "..." if len(doc.page_content) > 200 else doc.page_content,
            #         "metadata": doc.metadata
            #     }
            #     sources.append(source_info)

            for doc in source_docs:
                source_info = {
                    "content": doc.page_content,
                    "metadata": doc.metadata
                }
                sources.append(source_info)

            # 添加到对话历史
            self.conversation_history.append({"role": "user", "content": question})
            self.conversation_history.append({"role": "assistant", "content": answer})

            return {
                "question": question,
                "answer": answer,
                "sources": sources
            }
        except Exception as e:
            self.logger.error(f"获取文档洞察失败: {str(e)}", exc_info=True)
            return {
                "error": f"获取文档洞察失败: {str(e)}"
            }


# 使用示例
if __name__ == "__main__":
    # print(get_siliconflow_models())

    # 初始化助手
    assistant = FinancialQAAssistant()

    # 加载并处理文档
    try:
        # documents = assistant.load_document("/Users/simon/Code/Git/financial-large-model/data/annual report/pdf/2012_600288_大恒科技.pdf")
        documents = assistant.load_document("/Users/simon/Code/Git/financial-large-model/data/annual report/pdf/2018_000156_华数传媒.pdf")
        processed_docs = assistant.process_documents(documents)
        # assistant.create_vector_store(processed_docs, "2012_600288_大恒科技financial_report")
        assistant.create_vector_store(processed_docs, "2018_000156_华数传媒financial_report")


    except Exception as e:
        print(f"文档处理失败: {str(e)}")

    # 加载向量存储
    # assistant.load_vector_store("2012_600288_大恒科技financial_report")
    assistant.load_vector_store("2018_000156_华数传媒financial_report")

    # 初始化问答链
    assistant.init_qa_chain(prompt_type="financial_analysis")

    # # 提问并获取答案
    # question = "大恒科技2012年的营业收入是多少？与去年相比有什么变化？"
    # answer, sources = assistant.ask_question(question)
    # print(f"\n问题: {question}")
    # print(f"回答: {answer}")
    # print(f"\n来源文档:")
    # for i, doc in enumerate(sources[:2]):  # 显示前两个来源
    #     print(f"来源 {i + 1}:")
    #     print(f"  内容: {doc.page_content[:100]}...")
    #     print(f"  元数据: {doc.metadata}")

    # # 获取带来源的完整洞察
    # insight = assistant.get_document_insights("公司的核心风险点有哪些？")
    # print("\n文档洞察:")
    # print(f"问题: {insight['question']}")
    # print(f"回答: {insight['answer']}")
    # print("来源:")
    # for i, source in enumerate(insight['sources'][:2]):
    #     print(f"  {i + 1}. 页码: {source['metadata'].get('page', 'N/A')}")
    #     print(f"     内容摘要: {source['content']}")
    #
    # # 直接对话（不使用文档）
    # direct_answer = assistant.direct_chat("解释一下什么是市盈率？")
    # print(f"\n直接对话回答: {direct_answer}")

    insight = assistant.get_document_insights("输出企业年度报告中的管理层讨论与分析部分完整内容？")
    print("\n文档洞察:")
    print(f"问题: {insight['question']}")
    print(f"回答: {insight['answer']}")
    print("来源:")
    source_docs = []
    for i, source in enumerate(insight['sources'][:2]):
        print(f"  {i + 1}. 页码: {source['metadata'].get('page', 'N/A')}")
        print(f"     内容摘要: {source['content']}")
        source_docs.append(source['content'])
    print(source_docs)

    # 保存对话
    assistant.save_conversation("financial_analysis_conversation.json")

    # 提取数字风险关键词
