#from vanna.chromadb.chromadb_vector import ChromaDB_VectorStore
from vanna.milvus import Milvus_VectorStore

from vanna.openai.openai_chat import OpenAI_Chat
import re
import json
from ..utils.filter_manager import FilterConfigManager, build_filter_condition_prompt

class Telchina_Chat(OpenAI_Chat):
  def __init__(self, client=None, config=None):
    super().__init__(client, config)
    self.filter_manager = FilterConfigManager()

  def generate_router(self, question: str, page: str, history: list = None, **kwargs) -> str:
   if self.config is not None:
     initial_prompt = self.config.get("initial_prompt_router", None)
   else:
     initial_prompt = None

   if initial_prompt is None:
     initial_prompt = f"""
     你是页面跳转专家，目前系统中包含以下菜单，请从下面菜单的title中找出与用户问题匹配度最高的3个菜单并返回json数组数据，不要进行其他解释，没找到就回复空json数组！！！
     回复的json格式：[{{"path":"路径","title":"标题","url":"地址"}}]

     现有页面地址信息如下：
      {page}

      用户提问越具体，可以通过完全匹配或者部分匹配关键词来定位最相关的菜单。
      如果问题是广泛的或不完全明确，可以考虑模糊匹配关键词（例如，"能耗"与"能耗统计"），或者基于上下文提供几种相关菜单选项供用户选择。
     """
   message_log = [self.system_message(initial_prompt)]
   message_log.append(self.user_message(question))
   prompt = message_log
   self.log(prompt)
   llm_response = self.submit_prompt(prompt, **kwargs)
   self.log(llm_response)
   return llm_response

  def generate_plotly_code(self, question: str = None, sql: str = None, df_metadata: str = None,**kwargs) -> str:
    if question is not None:
      system_msg = f"""
      你是一位专业的数据可视化分析师。请按照以下步骤分析数据并生成高质量的Plotly可视化代码。

      ## 分析目标
      用户问题："{question}"
      SQL查询：{sql if sql else '未提供'}

      ## 数据信息
      保存在df变量结果中

      ## 图表类型选择指南
      请根据数据特征选择最适合的图表类型：

      ### 数值分析类
      - **趋势分析**：时间序列数据 → px.line (折线图)
      - **分布分析**：连续数值 → px.histogram (直方图) 或 px.box (箱线图)
      - **关系分析**：两个数值变量 → px.scatter (散点图)

      ### 分类比较类
      - **分类对比**：分类vs数值 → px.bar (柱状图) 或 px.barh (水平柱状图)
      - **占比分析**：分类占比 → px.pie (饼图) 或 px.sunburst (旭日图)

      ### 特殊情况
      - **单一指标**：仅一个数值 → go.Indicator (指示器)

      ## 数据预处理要求
      1. **数据量检查**：超过1000行建议采样显示
      2. **异常值检查**：识别并处理极端值
      3. **分类数据**：限制显示类别数量（建议≤15个）
      4. **时间数据**：确保正确的时间格式转换

      ## 代码质量标准
      1. **导入语句**：明确导入 plotly.express 或 plotly.graph_objects
      2. **标题设置**：添加清晰的图表标题
      3. **轴标签**：设置有意义的x轴和y轴标签
      4. **颜色主题**：使用合适的颜色方案
      5. **布局优化**：设置合适的图表尺寸和边距
      6. **交互性**：保留Plotly的交互功能

      ## 错误处理
      - 空数据：返回"数据为空，无法生成可视化"
      - 不适合可视化：说明具体原因
      - 数据类型不匹配：提供数据转换建议
      """
    else:
      system_msg = f"""
      你是一位专业的数据可视化分析师。请分析以下DataFrame数据并生成合适的Plotly可视化代码。

      ## 数据信息
      {df_metadata}

      请根据数据特征选择最适合的图表类型并生成高质量的可视化代码。
      """

    user_msg = """
    请基于上述数据信息生成Python Plotly可视化代码。

    ## 输出要求

    ### 1. 数据适用性判断
    首先判断数据是否适合进行可视化：
    - 如果数据为空或结构不合理，返回："数据不适合可视化：[具体原因]"
    - 如果适合可视化，继续下一步

    ### 2. 图表类型选择
    根据数据特征自动选择最佳图表类型，并在注释中简要说明选择理由

    ### 3. 代码生成标准
    生成的代码必须满足：
    ```python
    # 导入必要的库
    import plotly.express as px
    import plotly.graph_objects as go

    # 数据预处理（如需要）
    # df 已经包含查询结果
    # df = df.head(1000)  # 限制数据量

    # 生成图表示例代码
    fig = px.图表类型(df,
                    x='x列名',
                    y='y列名',
                    title='清晰的图表标题',
                    labels={'x列名': 'X轴标签', 'y列名': 'Y轴标签'},
                   )

    # 生成列表示例代码
    columns = list(df.columns)
    cell_values = []
    for col in columns:
        cell_values.append(df[col])

    fig = go.Figure(data=[go.Table(
        header=dict(values=columns,
                    fill_color='paleturquoise',
                    align='left'),
        cells=dict(values=cell_values,
                  fill_color='lavender',
                  align='left'))])
                  
    # 显示图表
    fig.show()
    ```
    ### 4. 特殊情况处理
    - 单一数值 ：使用 go.Indicator
    - 大数据集 ：添加采样代码
    - 时间序列 ：确保时间轴格式正确
    - 分类过多 ：只显示前N个类别
    ### 5. 输出格式
    - 只返回可执行的Python代码
    - 如果不适合可视化，只返回说明文字
    - 不要添加额外的解释或说明
    请现在开始分析并生成代码。
    """


    message_log = [
      self.system_message(system_msg),
      self.user_message(user_msg),
    ]

    plotly_code = self.submit_prompt(message_log, kwargs=kwargs)

    return self._sanitize_plotly_code(self._extract_python_code(plotly_code))


  def generate_sql(self, question: str, allow_llm_to_see_data=False, **kwargs) -> str:
    """
    在执行SQL生成之前进行预处理
    Args:
        question (str): 用于生成SQL查询的问题
        allow_llm_to_see_data (bool): 是否允许LLM查看数据库中的数据
        **kwargs: 其他参数
    Returns:
        str: 回答问题的SQL查询
    """
    if self.config is not None:
      initial_prompt = self.config.get("sql_analysis_prompt", None)

    if initial_prompt is not None:

      question_sql_list = self.get_similar_question_sql(question, **kwargs)
      ddl_list = self.get_related_ddl(question, **kwargs)
      doc_list = self.get_related_documentation(question, **kwargs)

      prompt = self.get_sql_analysis_prompt(
        initial_prompt=initial_prompt,
        question=question,
        question_sql_list=question_sql_list,
        ddl_list=ddl_list,
        doc_list=doc_list,
        **kwargs,
      )

      self.log(title="SQL Analysis Prompt", message=prompt)
      llm_response = self.submit_prompt(prompt, **kwargs)
      self.log(title="LLM Analysis Response", message=llm_response)
      table_list = self.extract_table_names(llm_response)
      self.log(title="提取的表信息：", message=table_list)
      supplement_ddl_list=[]
      for table_str in table_list:
        temp_ddl_list = self.get_related_ddl(table_str, **kwargs)
        supplement_ddl_list.extend(temp_ddl_list)
      kwargs["supplement_ddl_list"] = supplement_ddl_list
      kwargs["sql_analysis_result"] = llm_response
      
    # 构建动态过滤条件
    filter_conditions_json = self._build_filter_conditions(**kwargs)
    # 构建完整的问题提示
    question_addition = build_filter_condition_prompt(filter_conditions_json)
    question = question_addition + "\n 问题：" + question

    sql = super().generate_sql(question, allow_llm_to_see_data, **kwargs)
    return sql

  def _build_filter_conditions(self, **kwargs) -> str:
    """
    动态构建过滤条件JSON
    """
    all_conditions = []
    
    # 统一处理所有过滤条件
    filter_conditions = kwargs.get('filter_conditions', {})
    
    # 为了向后兼容，只处理直接传入的单独参数（当filter_conditions中没有对应字段时）
    if 'organ_ids' in kwargs and kwargs['organ_ids'] is not None and 'organ_ids' not in filter_conditions:
        filter_conditions['organ_ids'] = kwargs['organ_ids']
    
    # 处理所有过滤条件
    for filter_name, filter_value in filter_conditions.items():
        if filter_value is not None:  # 只处理非空值
            rules = self.filter_manager.get_filter_rules([filter_name])
            for rule in rules:
                condition_sql = self.filter_manager.build_condition_sql(rule, filter_value)
                all_conditions.append({
                    "table": rule.table,
                    "field": rule.field,
                    "description": rule.description,
                    "type": rule.field_type.value,
                    "condition": condition_sql
                })
    
    # 构建JSON格式
    filter_json = {
        "filter_conditions": all_conditions
    }
    
    return json.dumps(filter_json, ensure_ascii=False, indent=2)

  def get_sql_analysis_prompt(
      self,
      initial_prompt : str,
      question: str,
      question_sql_list: list,
      ddl_list: list,
      doc_list: list,
      **kwargs,
  ):
      if initial_prompt is None:
          initial_prompt = f"你是 {self.dialect} 专家。" + \
                         "分析用户问题中需要的的表信息。 **需要使用的表名列表**：列出查询中将会使用的所有表名，格式为：`schema.table_name`，例如：`sl.l_his_hitch_alarm`, `sl.l_lamppost` 等。确保列出所有表，包括子查询中使用的表。"

      initial_prompt = self.add_ddl_to_prompt(
          initial_prompt, ddl_list, max_tokens=self.max_tokens
      )

      if self.static_documentation != "":
          doc_list.append(self.static_documentation)

      initial_prompt = self.add_documentation_to_prompt(
          initial_prompt, doc_list, max_tokens=self.max_tokens
      )

      message_log = [self.system_message(initial_prompt)]

      for example in question_sql_list:
          if example is None:
              print("example is None")
          else:
              if example is not None and "question" in example and "sql" in example:
                  message_log.append(self.user_message(example["question"]))
                  message_log.append(self.assistant_message(example["sql"]))
      message_log.append(self.user_message(question))
      return message_log

  def get_sql_prompt(
      self,
      initial_prompt : str,
      question: str,
      question_sql_list: list,
      ddl_list: list,
      doc_list: list,
      **kwargs,
  ):
      """
      Example:
      ```python
      vn.get_sql_prompt(
          question="What are the top 10 customers by sales?",
          question_sql_list=[{"question": "What are the top 10 customers by sales?", "sql": "SELECT * FROM customers ORDER BY sales DESC LIMIT 10"}],
          ddl_list=["CREATE TABLE customers (id INT, name TEXT, sales DECIMAL)"],
          doc_list=["The customers table contains information about customers and their sales."],
      )

      ```

      This method is used to generate a prompt for the LLM to generate SQL.

      Args:
          question (str): The question to generate SQL for.
          question_sql_list (list): A list of questions and their corresponding SQL statements.
          ddl_list (list): A list of DDL statements.
          doc_list (list): A list of documentation.

      Returns:
          any: The prompt for the LLM to generate SQL.
      """

      if initial_prompt is None:
          initial_prompt = f"You are a {self.dialect} expert. " + \
          "Please help to generate a SQL query to answer the question. Your response should ONLY be based on the given context and follow the response guidelines and format instructions. "

      supplement_ddl_list = kwargs.get("supplement_ddl_list", [])
       # 合并 supplement_ddl_list 和 ddl_list
      ddl_list = list(set(ddl_list + supplement_ddl_list))

      initial_prompt = self.add_ddl_to_prompt(
          initial_prompt, ddl_list, max_tokens=self.max_tokens
      )

      if self.static_documentation != "":
          doc_list.append(self.static_documentation)

      initial_prompt = self.add_documentation_to_prompt(
          initial_prompt, doc_list, max_tokens=self.max_tokens
      )

      initial_prompt += (
          "===Response Guidelines \n"
          "1. If the provided context is sufficient, please generate a valid SQL query without any explanations for the question. \n"
          "2. If the provided context is almost sufficient but requires knowledge of a specific string in a particular column, please generate an intermediate SQL query to find the distinct strings in that column. Prepend the query with a comment saying intermediate_sql \n"
          "3. If the provided context is insufficient, please explain why it can't be generated. \n"
          "4. Please use the most relevant table(s). \n"
          "5. If the question has been asked and answered before, please repeat the answer exactly as it was given before. \n"
          f"6. Ensure that the output SQL is {self.dialect}-compliant and executable, and free of syntax errors. \n"
      )

      message_log = [self.system_message(initial_prompt)]

      # 添加示例数据部分
      if question_sql_list and any(example is not None for example in question_sql_list):
          message_log.append(self.system_message("以下是相关的参考示例，请参考这些示例的问答模式："))
          
          for example in question_sql_list:
              if example is None:
                  print("example is None")
              else:
                  if example is not None and "question" in example and "sql" in example:
                      message_log.append(self.user_message(f"[参考示例] {example['question']}"))
                      message_log.append(self.assistant_message(example["sql"]))

      # 添加历史对话部分
      question_sql_history = kwargs.get("question_sql_history", [])
      if question_sql_history:
          message_log.append(self.system_message("以下是本次对话的历史记录，请基于这些上下文继续回答："))
          
          for item in question_sql_history:
              # 适配字典格式的历史记录
              if isinstance(item, dict):
                  p_question = item.get("question")
                  p_sql = item.get("sql")
              # 兼容元组格式的历史记录
              elif isinstance(item, (tuple, list)) and len(item) >= 2:
                  p_question, p_sql = item[0], item[1]
              else:
                  continue
                  
              if p_question is not None and p_sql is not None:
                  message_log.append(self.user_message(f"[对话历史] {p_question}"))
                  message_log.append(self.assistant_message(p_sql))
      
      # 添加当前问题
      message_log.append(self.system_message("现在请回答以下新问题："))
      message_log.append(self.user_message(question))

      return message_log

  def extract_table_names(self, text):
    # 直接提取所有表名，包括带有中文描述的表名
    tables = []
    table_base_names = {}  # 用于存储表名的基本名称和对应的完整表名

    # 首先尝试匹配带有中文描述的表名，如 l_his_hitch_alarm(单灯报警表)
    table_with_desc_matches = re.findall(r'([\w_]+)\([^\)]+\)', text)
    if table_with_desc_matches:
      for table in table_with_desc_matches:
        # 查找完整的表名（包括中文描述）
        full_match = re.search(r'(' + re.escape(table) + r'\([^\)]+\))', text)
        if full_match:
          full_table = full_match.group(1)
          tables.append(full_table)
          table_base_names[table] = full_table  # 记录基本表名和完整表名的映射

    # 匹配带有空格和括号描述的表名，如 p_check_task (巡检任务表)
    table_with_space_desc_matches = re.findall(r'([\w_]+)\s+\([^\)]+\)', text)
    if table_with_space_desc_matches:
      for table in table_with_space_desc_matches:
        # 查找完整的表名（包括空格和描述）
        full_match = re.search(r'(' + re.escape(table) + r'\s+\([^\)]+\))', text)
        if full_match:
          full_table = full_match.group(1)
          tables.append(full_table)
          table_base_names[table] = full_table  # 记录基本表名和完整表名的映射

    # 然后匹配所有形如sl.table_name或table.field_name的模式
    schema_table_matches = re.findall(r'`?([\w]+)\.([\w_]+)`?', text)
    if schema_table_matches:
      for schema, table in schema_table_matches:
        # 如果schema是sl或ph，则添加table
        if schema == 'sl' or schema == 'ph':
          # 如果该表已经有带描述的完整表名，则跳过
          if table in table_base_names:
            continue
          # 查找是否有带中文描述的完整表名
          full_match = re.search(r'(' + re.escape(table) + r'\([^\)]+\))', text)
          if full_match:
            full_table = full_match.group(1)
            tables.append(full_table)
            table_base_names[table] = full_table
          else:
            # 查找是否有带空格和描述的完整表名
            space_match = re.search(r'(' + re.escape(table) + r'\s+\([^\)]+\))', text)
            if space_match:
              full_table = space_match.group(1)
              tables.append(full_table)
              table_base_names[table] = full_table
            else:
              # 如果没有找到带描述的表名，则添加基本表名
              tables.append(table)
              table_base_names[table] = table
        # 否则添加schema（假设schema是表名，table是字段名）
        else:
          if schema not in table_base_names:  # 避免重复添加
            tables.append(schema)
            table_base_names[schema] = schema

    # 去重，优先保留带描述的表名
    unique_tables = []
    processed_base_names = set()

    for table in tables:
      # 提取基本表名（不带描述部分）
      base_name = re.match(r'([\w_]+)', table).group(1)

      # 如果这个基本表名还没处理过，添加它
      if base_name not in processed_base_names:
        unique_tables.append(table)
        processed_base_names.add(base_name)
      # 如果已经处理过，但当前表名带有描述而之前添加的没有，替换它
      elif '(' in table and not any('(' in t and t.startswith(base_name) for t in unique_tables):
        # 找到并移除之前添加的不带描述的表名
        for i, t in enumerate(unique_tables):
          if t == base_name or (t.startswith(base_name) and not '(' in t):
            unique_tables[i] = table
            break

    return unique_tables if unique_tables else []


# class Telchina_LocalContext_OpenAI(ChromaDB_VectorStore, Telchina_Chat):
#   def __init__(self, client=None, config=None):
#     ChromaDB_VectorStore.__init__(self, config=config)
#     Telchina_Chat.__init__(self, client, config=config)

class Telchina_LocalContext_OpenAI(Telchina_Chat):
  def __init__(self, client=None, config=None):
    Telchina_Chat.__init__(self, client, config=config)

class Telchina_LocalContext_OpenAI_Milvus(Milvus_VectorStore, Telchina_Chat):
  def __init__(self, client=None, config=None):
    Milvus_VectorStore.__init__(self, config=config)
    Telchina_Chat.__init__(self, client, config=config)
