# vector_db_utils.py

from openai import OpenAI
from config import Config
from vanna.openai.openai_chat import OpenAI_Chat
from vanna.chromadb import ChromaDB_VectorStore
from openai import OpenAI
from sentence_transformers import SentenceTransformer
from vanna.base import VannaBase
from chromadb.utils import embedding_functions
import re
import uuid

def deterministic_uuid(text: str) -> str:
    return str(uuid.uuid5(uuid.NAMESPACE_DNS, text))

class MyVanna(ChromaDB_VectorStore, OpenAI_Chat, VannaBase):
    def __init__(self, config=None):
        config = config or {}

        # 初始化本地嵌入模型
        self.embedding_model_path = config.get("embedding_model_path", Config.EMBED_MODEL_PATH)
        config["embedding_function"] = embedding_functions.SentenceTransformerEmbeddingFunction(
            model_name=self.embedding_model_path
        )

        # 初始化向量数据库
        ChromaDB_VectorStore.__init__(self, config=config)

        # 初始化 LLM 配置
        self.llm_api_key = config.get("llm_api_key", Config.LLM_API_KEY)
        self.llm_base_url = config.get("llm_base_url", Config.LLM_BASE_URL)
        self.llm_model_name = config.get("llm_model_name", Config.LLM_MODEL_NAME)

        # 创建 OpenAI 兼容客户端
        self.client = OpenAI(
            api_key=self.llm_api_key,
            base_url=self.llm_base_url
        )
        

    def submit_prompt(self, prompt, **kwargs) -> str:
        """
        使用 OpenAI SDK 提交 prompt，兼容 SiliconFlow / vLLM / FastChat
        """
        try:
            response = self.client.chat.completions.create(
                model=self.llm_model_name,
                messages=prompt,
                temperature=kwargs.get("temperature", 0.7),
                max_tokens=kwargs.get("max_tokens", 512),
                stream=kwargs.get("stream", False)
            )

            if kwargs.get("stream", False):
                full_response = ""
                for chunk in response:
                    if chunk.choices and chunk.choices[0].delta.content:
                        content = chunk.choices[0].delta.content
                        full_response += content
                        print(content, end="", flush=True)
                return full_response
            else:
                return response.choices[0].message.content

        except Exception as e:
            raise RuntimeError(f"LLM request failed: {e}")
    


    def ask_with_rag(self, query: str, top_k: int = 10, **kwargs) -> str:
        """
        执行基于向量检索的 RAG + LLM 推理，返回生成的回答

        Args:
            query (str): 用户问题
            top_k (int): 每类知识片段检索数量

        Returns:
            str: LLM 返回的回答
        """

        self.n_results_documentation = top_k

        # 取相关内容
        related_docs = self.get_related_documentation(query)

        # 拼接 Prompt
        doc_context = "\n".join(related_docs) if related_docs else "无文档信息"

        prompt_text = f"""你是一个知识问答助手，请根据以下知识回答用户问题。

    【相关文档说明】
    {doc_context}

    【用户问题】
    {query}

    请结合以上信息，进行专业、清晰的中文回答"""

        # 封装成 OpenAI 风格的 message 格式
        messages = [
            {"role": "system", "content": "你是一个知识问答的专家。"},
            {"role": "user", "content": prompt_text}
        ]
        print(doc_context)

        return self.submit_prompt(messages, **kwargs)
    
    def add_doc(self, document: str, metadata: dict = None) -> str:
        doc_id = deterministic_uuid(document)
        self.documentation_collection.add(
            documents=[document],
            embeddings=self.generate_embedding(document),
            ids=[doc_id],
            metadatas=[metadata or {}]
        )
        return doc_id
    
    def add_ddl_details(self, ddl: str):
        table_name = self._extract_table_name(ddl)
        columns = self._extract_columns_from_ddl(ddl)

        for col in columns:
            doc = f"表 `{table_name}` 中字段 `{col['name']}` 类型为 {col['type']}，含义：{col['comment']}。"
            metadata = {
                "table": table_name,
                "column": col["name"],
                "type": "column",
                "comment": col["comment"]
            }
            self.add_doc(document=doc, metadata=metadata)

    def _extract_table_name(self, ddl: str) -> str:
        match = re.search(r'CREATE\s+TABLE\s+`?(\w+)`?', ddl, re.IGNORECASE)
        return match.group(1) if match else "unknown_table"

    def _extract_columns_from_ddl(self, ddl: str) -> list:
        columns = []

        # 提取括号内的列定义部分
        bracket_content_match = re.search(r'\((.*)\)', ddl, re.DOTALL)
        if not bracket_content_match:
            return columns  # 没有匹配到括号内容，返回空列表

        columns_part = bracket_content_match.group(1)

        # 按逗号分割行，排除行尾注释和换行符
        lines = [line.strip() for line in columns_part.split(',') if line.strip()]

        # 匹配列定义的正则，排除常见约束关键字开头的行（比如 PRIMARY KEY, UNIQUE, CONSTRAINT, FOREIGN KEY 等）
        column_pattern = re.compile(
            r'^`?(\w+)`?\s+([^\s]+(?:\([^\)]*\))?)(?:\s+COMMENT\s+\'([^\']*)\')?', re.IGNORECASE
        )

        exclude_keywords = ['PRIMARY', 'UNIQUE', 'CONSTRAINT', 'FOREIGN', 'KEY', 'INDEX', 'CHECK']

        for line in lines:
            # 跳过以约束关键字开头的行
            if any(line.upper().startswith(kw) for kw in exclude_keywords):
                continue

            m = column_pattern.match(line)
            if m:
                name, col_type, comment = m.groups()
                columns.append({
                    "name": name,
                    "type": col_type,
                    "comment": comment or ""
                })

        return columns
    
    def add_subject_hierarchy_docs(self, df):
        """
        将带有层级信息的科目表（含 full_path）入向量数据库
        """
        for _, row in df.iterrows():
            level = row.get("level")
            level_1 = row.get("level_1", "")
            level_2 = row.get("level_2", "")
            level_3 = row.get("level_3", "")
            full_path = row.get("full_path", "")
            
            doc = f"完整路径为：{full_path}"
            if level == 1:
                doc += f"。一级科目为：{level_1}"
            elif level == 2:
                doc += f"。一级科目为：{level_1}，二级科目为：{level_2}"
            elif level == 3:
                doc += f"。一级科目为：{level_1}，二级科目为：{level_2}，三级科目为：{level_3}"

            metadata = {
                "type": "subject-hierarchy",
                "level": level,
                "level_1": level_1,
                "level_2": level_2,
                "level_3": level_3,
                "full_path": full_path
            }

            self.add_doc(doc, metadata)

    def generate_sql_(self, question: str, allow_llm_to_see_data=False, top_k=10, **kwargs) -> str:
        """
        优化版 generate_sql_：保留原始 Vanna RAG 推理链优势，并结合 dynamic_cost_summary 等表的业务规则。
        最终只返回清洁 SQL 字符串。
        """

        if self.config is not None:
            initial_prompt = self.config.get("initial_prompt", None)
        else:
            initial_prompt = None

        # 原始上下文数据收集
        question_sql_list = self.get_similar_question_sql(question, **kwargs)
        ddl_list = self.get_related_ddl(question, **kwargs)
        self.n_results_documentation = top_k
        doc_list = self.get_related_documentation(question, **kwargs)

        # 业务规则注入（可拓展成按表切换）
        context_rules = """
【SQL 构造规则】
- 优先使用 subject_name = 'xxx' 精确匹配科目名称；
- 若问题中提到“一级科目/二级科目/三级科目”，请匹配对应 level_n；
    - 如“二级科目 建筑安装工程费”，应匹配 level_2 = '建筑安装工程费'
- 若问题还提到“有哪些内容”、“下属科目”等，应进一步查它的下一层级：
    - 即 level_2 = 'xxx' 且 level = 3，代表它的下属是 level_3
- 默认不进行聚合，除非问题中包含“总和”“合计”“汇总”等字样；
- 示例层级结构：level_0 为“项目成本”，level_1 为一级科目，level_2 为二级，level_3 为三级；
- 查询某科目的某项值，如“开发成本的目标成本”，应直接写：
  WHERE subject_name = '开发成本'
- 切勿仅使用 level_1 = 'xxx' 等条件判断科目；
"""



        # 多轮 prompt 构造
        prompt = self.get_sql_prompt(
            initial_prompt=initial_prompt,
            question=question,
            question_sql_list=question_sql_list,
            ddl_list=ddl_list,
            doc_list=doc_list + [context_rules],
            **kwargs,
        )

        self.log(title="SQL Prompt", message=prompt)
        llm_response = self.submit_prompt(prompt, **kwargs)
        self.log(title="LLM Response", message=llm_response)

        # 若有 intermediate_sql 并允许 introspect，再生成新一轮
        if 'intermediate_sql' in llm_response:
            if not allow_llm_to_see_data:
                return "LLM需要访问数据以完成 SQL 构造，请设置 allow_llm_to_see_data=True 以允许。"

            intermediate_sql = self.extract_sql(llm_response)
            try:
                self.log(title="Running Intermediate SQL", message=intermediate_sql)
                df = self.run_sql(intermediate_sql)

                # 将数据表作为文档再次喂给 LLM
                prompt = self.get_sql_prompt(
                    initial_prompt=initial_prompt,
                    question=question,
                    question_sql_list=question_sql_list,
                    ddl_list=ddl_list,
                    doc_list=doc_list + [context_rules, f"中间查询结果如下:\n{df.to_markdown()}"],
                    **kwargs,
                )
                self.log(title="Final SQL Prompt", message=prompt)
                llm_response = self.submit_prompt(prompt, **kwargs)
                self.log(title="LLM Final Response", message=llm_response)
            except Exception as e:
                return f"Error running intermediate SQL: {e}"

        # 最终清洗输出：只返回 SQL 语句本身
        return self.extract_sql(llm_response).strip()


vn = MyVanna()


