import re
from abc import ABC

import pandas as pd
from chromadb import EmbeddingFunction, Documents, Embeddings
from openai import OpenAI
from vanna.base import VannaBase
from vanna.chromadb import ChromaDB_VectorStore
from vanna.openai import OpenAI_Chat

from server.config.models_config import MyXinferenceEmbeddings
from server.settings import Settings
from server.utils.url_util import get_base_url


class MyVannaBase(VannaBase, ABC):
    def __init__(self, config=None):
        VannaBase.__init__(self, config=config)

    def get_sql_prompt(
            self,
            initial_prompt: str,
            question: str,
            question_sql_list: list,
            ddl_list: list,
            doc_list: list,
            **kwargs,
    ):
        """
        Example:
        ```python
        vn.get_sql_prompt(
            question="What are the top 10 customers by sales?",
            question_sql_list=[{"question": "What are the top 10 customers by sales?", "sql": "SELECT * FROM customers ORDER BY sales DESC LIMIT 10"}],
            ddl_list=["CREATE TABLE customers (id INT, name TEXT, sales DECIMAL)"],
            doc_list=["The customers table contains information about customers and their sales."],
        )

        ```

        This method is used to generate a prompt for the LLM to generate SQL.

        Args:
            question (str): The question to generate SQL for.
            question_sql_list (list): A list of questions and their corresponding SQL statements.
            ddl_list (list): A list of DDL statements.
            doc_list (list): A list of documentation.

        Returns:
            any: The prompt for the LLM to generate SQL.
        """

        if initial_prompt is None:
            initial_prompt = f"你是一个{self.dialect}专家。 " + \
                             "请帮助生成 SQL 查询来回答问题。您的回答应仅基于给定的上下文并遵循回复指南和格式说明。"

        initial_prompt = self.add_ddl_to_prompt(
            initial_prompt, ddl_list, max_tokens=self.max_tokens
        )

        if self.static_documentation != "":
            doc_list.append(self.static_documentation)

        initial_prompt = self.add_documentation_to_prompt(
            initial_prompt, doc_list, max_tokens=self.max_tokens
        )

        initial_prompt += (
            "===回复指南 \n"
            "1. 如果提供的上下文足够，请生成有效的 SQL 查询，但不对问题进行任何解释。 \n"
            "2. 如果提供的上下文几乎足够，但需要了解特定列中的特定字符串，请生成中间 SQL 查询以查找该列中的不同字符串。在查询前面添加注释，说明中间SQL \n"
            "3. 如果提供的上下文不充分，请解释为什么无法生成。 \n"
            "4. 请使用最相关的数据表。 \n"
            "5. 如果该问题之前已经问过并得到回答，请准确重复之前的答案。 \n"
        )

        message_log = [self.system_message(initial_prompt)]

        for example in question_sql_list:
            if example is None:
                print("example is None")
            else:
                if example is not None and "question" in example and "sql" in example:
                    message_log.append(self.user_message(example["question"]))
                    message_log.append(self.assistant_message(example["sql"]))

        message_log.append(self.user_message(question))

        return message_log

    def generate_followup_questions(
            self, question: str, sql: str, df: pd.DataFrame, n_questions: int = 5, **kwargs
    ) -> list:
        """
        **Example:**
        ```python
        vn.generate_followup_questions("What are the top 10 customers by sales?", sql, df)
        ```

        Generate a list of followup questions that you can ask Vanna.AI.

        Args:
            question (str): The question that was asked.
            sql (str): The LLM-generated SQL query.
            df (pd.DataFrame): The results of the SQL query.
            n_questions (int): Number of follow-up questions to generate.

        Returns:
            list: A list of followup questions that you can ask Vanna.AI.
        """

        message_log = [
            self.system_message(
                f"你是一个乐于助人的数据助手。用户问了这个问题：'{question}'\n\n该问题的SQL查询是：{sql}\n\n以下是包含查询结果的 pandas DataFrame：\n{df.to_markdown()}\n\n"
            ),
            self.user_message(
                f"生成用户可能就此数据询问的 {n_questions} 个后续问题列表。以问题列表的形式进行回复，每行一个。不要回答任何解释 -- 只回答问题。请记住，应该有一个可以从问题生成的明确 SQL 查询。最好选择可以在对话上下文之外回答的问题。最好选择对生成的 SQL 查询进行轻微修改的问题，以便更深入地挖掘数据。每个问题都将变成一个按钮，用户可以单击该按钮来生成新的 SQL 查询，因此不要使用“示例”类型的问题。每个问题都必须与实例化的 SQL 查询一一对应。" +
                self._response_language()
            ),
        ]

        llm_response = self.submit_prompt(message_log, **kwargs)

        numbers_removed = re.sub(r"^\d+\.\s*", "", llm_response, flags=re.MULTILINE)
        return numbers_removed.split("\n")


class MyVanna(MyVannaBase, ChromaDB_VectorStore, OpenAI_Chat):
    def __init__(self, config=None):
        config['embedding_function'] = MyEmbeddingFunction()
        ChromaDB_VectorStore.__init__(self, config=config)
        client = OpenAI(api_key=config['api_key'], base_url=config['base_url'])
        OpenAI_Chat.__init__(self, client=client, config=config)


class MyEmbeddingFunction(EmbeddingFunction):
    def __call__(self, texts: Documents) -> Embeddings:
        my_xinference_embeddings = MyXinferenceEmbeddings(
            server_url=get_base_url(Settings.model_settings.DEFAULT_API_BASE_URL),
            api_key=Settings.model_settings.DEFAULT_API_KEY,
            model_uid=Settings.model_settings.DEFAULT_EMBEDDING_MODEL
        )
        embeddings = [my_xinference_embeddings.embed_query(x) for x in texts]
        return embeddings


vn = MyVanna(config={
    'base_url': Settings.model_settings.DEFAULT_API_BASE_URL,
    'api_key': Settings.model_settings.DEFAULT_API_KEY,
    'model': Settings.model_settings.DEFAULT_LLM_MODEL
})
