import chromadb
import openai
from sqlalchemy import create_engine, inspect
from sqlalchemy import text
from llm.model_factory import ModelFactory
from services.chroma_service import ChromaService
from services.database_service import DatabaseService
from services.sql_permission_service import SQLPermissionService
import os
import yaml
from typing import Dict, Any, List
from collections import defaultdict
import re
import plotly
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
from config import MODEL_NAME, EMBEDDING_MODEL
from utils.tools import _parse_llm_response, extract_sql
from datetime import datetime
from decimal import Decimal
from model_config import DEFAULT_EMBED_PROVIDER, DEFAULT_EMBED_MODEL
from llm.model_service import ModelService
import json
from services.authorization_service import AuthorizationService

class Text2SQLAgent:
    def __init__(self, db_dialect: str = "kylin"):
        self.model_client = ModelFactory.create_model()
        self.chroma = ChromaService()
        self.sql_permission = SQLPermissionService()
        self.model_name = MODEL_NAME
        # self.embedding_model = EMBEDDING_MODEL
        self.table_collection_name = "table_descriptions"
        self.code_collection_name = "code_tables"
        self.qa_pairs_collection_name = "qa_pairs"
        self.db_dialect = db_dialect  # 新增数据库方言参数
        self.auth_service = AuthorizationService()
        
        self.meta_db_config = {
            'db_type': 'openGauss',
            'host': '10.1.205.5',
            'port': 5432,
            'user': 'dmp_dev',
            'password': 'dmp@1234',
            'database': 'db_dmp'
        }

        self.metrics_db_config = {
            'db_type': 'iris',
            'host': '111.205.100.105',
            'dbname': 'mdr2025',
            'user': '_system',
            'password': 'dhcc@123',
            'port': 1972
        }

        self.kylin_config = {
            'db_type': 'kylin',
            'host': '10.1.200.115',
            'port': 7070,
            'user': 'admin',
            'password': 'KYLIN',
            'project': 'MDRV1'
        }

        self.sqlserver_config = {
            'db_type': 'sqlserver'
        }

        self.data_db_config = {
            'db_type': 'sqlserver',
            'host': '127.0.0.1',
            'database': 'mdr',
            'user': 'sa',
            'password': '123456',
            'port': 1433
        }
        
        self.prompts = self._load_prompts()

    def _load_prompts(self) -> Dict[str, Dict[str, str]]:
        """加载prompt模板配置"""
        config_path = os.path.join('config', 'prompts.yaml')
        with open(config_path, 'r', encoding='utf-8') as f:
            return yaml.safe_load(f)

    def _call_embed(self, text, model=None):
        try:
            if model is None:
                model = self.embedding_model
            embeddings = ModelService.call(
                provider_name=DEFAULT_EMBED_PROVIDER,
                model_name=DEFAULT_EMBED_MODEL,
                mode="embed", stream=False, text=text)
            # embeddings = self.model_client.get_embeddings(text, model)
            return embeddings[0]
        except Exception as e:
            print(f"Error getting embedding: {str(e)}")
            raise

    def _call_llm(self, system_prompt: str, user_prompt: str) -> str:
        return self.model_client.chat(
            messages=[
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": user_prompt + " /no_think"}
            ],
            model=self.model_name,
            temperature=0
        )

    def get_relevant_tables(self, user_query):
        results = self.chroma.list_records(
                    collection_name=self.table_collection_name,
                    embedding_model=None,
                    page=1,
                    page_size=30,
                    search_type='hybrid',
                    record_id=None,
                    query_text=user_query,
                    metadata_filter=None,
                    document_filter=None,
                    n_results_requested=30
                )
        if not results.get('records'):  # 检查是否存在有效记录集
            return []
        
        candidate_tables = []
        for i, result in enumerate(results['records']):
            description = result['document']
            candidate_tables.append(description)

        if len(candidate_tables) == 0:  # 如果没有找到相关表，则返回空列表
            return []
        
        prompt = f"Given the user's query: '{user_query}', and the following table descriptions:\n"
        for table_description in candidate_tables:
            prompt += f"- {table_description}\n"
        prompt += """Please identify which tables are relevant to answer the query, including both fact tables and dimension tables. 
Then, return a JSON array containing objects with 'table' and 'description' for each relevant table. 
Please ensure that:
- The 'table' names are in lower case
- Include the schema name if available, in the format 'schema.table'
- Maintain the original schema prefix if present in the table descriptions
For example: [{\"table\": \"table_name\", \"description\": \"description\"}, ...]"""
        
        response = self._call_llm(system_prompt="You are an expert in database schema understanding.",
                  user_prompt=prompt)
        
        print(f"LLM Response: {response}")
        relevant_tables = _parse_llm_response(response=response, default_value=[])
        return relevant_tables
    
    def get_table_schemas(self, table_names):
        # 将入参全部转换为小写
        table_names = [name.lower() for name in table_names] if table_names else []
        
        # 构造 SQL 查询条件：
        # 对于每个入参表名，构造条件 "'入参表名' LIKE concat(lower(a.DAT_TableName), '%')"
        where_clause = ""
        if table_names: 
            conditions = ["'{}' like concat(lower(concat(a.dat_schemaname, '.', a.dat_tablename)), '%')".format(name) for name in table_names]
            where_clause = "where " + " or ".join(conditions)
        
        # 定义查询，获取表元数据
        schema_prefix = "sm_dmp."
        query = f"""
select a.dat_schemaname, a.dat_tablename, b.datp_name, b.datp_remarks, b.datp_key, b.datp_foreignkey, 
       b.datp_type, b.datp_length, c.dict_code, c.dict_name, c.source_item
from {schema_prefix}data_asset_tableinfo a
left join {schema_prefix}data_asset_tableproinfo b on a.dat_code = b.datp_code
left join {schema_prefix}ds_dictionary c on b.datp_dictid = c.id
        {where_clause}
        """
        print("get table schemas: ", query)
        # 执行查询
        code, result = DatabaseService.execute_query(connection_config=self.meta_db_config, query=query, limit=None)
        if not code:
            return {}  # 查询失败返回空字典
        
        # 创建列名到索引的映射
        # col_map = {col: idx for idx, col in enumerate(result["columns"])}
        # print(f"Column map: {col_map}")

        # 按（schema, table）分组数据
        table_data = defaultdict(list)
        for row in result["data"]:
            schema = row['dat_schemaname']
            table = row['dat_tablename']
            full_key = f"{schema}.{table}"
            
            # 检查是否应包含此表
            # if not table_names or full_key in table_names:
            column_info = {
                'name': row['datp_name'],
                'type': row['datp_type'],
                'length': row['datp_length'],
                'comment': row['datp_remarks'],
                'dict_code': row['dict_code'],
            }
            table_data[full_key].append(column_info)
        
        # 初始化 schemas 字典
        schemas = {}
        
        # 为每个表生成 CREATE TABLE 语句
        for full_key, columns in table_data.items():
            schema, table = full_key.split('.')
            
            # 生成列定义
            col_defs = []
            for col in columns:
                col_def = f"{col['name']} {col['type']}"
                if col['type'].lower() == 'varchar' and col['length']:
                    col_def += f"({col['length']})"
                if col['comment']:
                    col_def += f" COMMENT '{col['comment']}'"
                col_defs.append(col_def)
            
            # 生成外键约束
            fk_defs = []
            for col in columns:
                if col['dict_code']:
                    parts = col['dict_code'].split('.')
                    if len(parts) == 2:
                        ref_schema, ref_table = parts
                        fk_def = f"foreign key ({col['name']}) references {ref_schema}.{ref_table}(UniCode)"
                        fk_defs.append(fk_def)
            
            # 组合列定义和外键
            all_defs = col_defs + fk_defs
            
            # 创建 CREATE TABLE 语句
            create_table = (
                f"CREATE TABLE {table} ("
                + "\n  "
                + ",\n  ".join(all_defs)
                + "\n)"
            )
            
            # 存储到 schemas 字典
            schemas[full_key] = create_table
        print(schemas)
        final_schemas = {}
        for table_name in table_names:
            for schema_key, create_table in schemas.items():
                if table_name.startswith(schema_key.lower()):
                    # 替换建表语句中的表名为 table_names 中的表名
                    schema_table = schema_key.split('.')[-1]  # 获取原始表名
                    target_table = table_name.split('.')[-1]  # 获取目标表名
                    modified_create_table = create_table.replace(
                        f"CREATE TABLE {schema_table}", 
                        f"CREATE TABLE {target_table}"
                    )
                    final_schemas[table_name] = modified_create_table
                    break  # 找到匹配就跳出内层循环
        
        return final_schemas

    def extract_filter_values(self, user_query):
        prompt = f"""
Your task is to extract filtering values from the given query, such as dates, numbers, names, categories, locations, or any other identifiers that specify a subset of data. Return the result as a JSON array containing these values, with no additional text or explanation.

### Examples:
- Query: '统计内科的收入总量'  
  Output: ["内科"]
- Query: 'total sales for Q1 2023'  
  Output: ["Q1 2023"]
- Query: '院内有多少科室'  
  Output: []

Now, for the query '{user_query}', output only the JSON array of filtering values.
"""
        values = self._call_llm(
            system_prompt="You are an expert in natural language understanding for SQL queries.",
            user_prompt=prompt
        )
        
        return values

    def get_possible_code_tables(self, filter_values, metafilter):
        possible_ct = {}
        
        if isinstance(filter_values, str):
            filter_values = _parse_llm_response(filter_values, default_value=[])

        for value in filter_values:
            if not isinstance(value, str):
                continue
                
            results = self.chroma.list_records(
                        collection_name=self.code_collection_name,
                        embedding_model=None,
                        page=1,
                        page_size=10,
                        search_type='hybrid',
                        record_id=None,
                        query_text=value,
                        metadata_filter=metafilter,
                        document_filter=None,
                        n_results_requested=10
                    )
            #print(f"Value: {value}, Possible code tables: {results}")
            if not results.get('records'):
                continue
            
            code_tables = set()
            for i, result in enumerate(results['records']):
                code_desc = result['document']
                code_tables.add(code_desc)
            possible_ct[value.lower()] = list(code_tables)
        return possible_ct

    def get_similar_qa(self, user_query):
        results = self.chroma.list_records(
                    collection_name=self.qa_pairs_collection_name,
                    embedding_model=None,
                    page=1,
                    page_size=10,
                    search_type='hybrid',
                    record_id=None,
                    query_text=user_query,
                    metadata_filter=None,
                    document_filter=None,
                    n_results_requested=10
                )
            
        if not results.get('records'):
            return []
        
        similar_qa = []
        for i, result in enumerate(results['records']):
            qa_pair = result['document']
            similar_qa.append(qa_pair)
        return similar_qa

    def generate_sql(self, user_query, 
                     relevant_tables, 
                     table_schemas, 
                     similar_qa, 
                     history_messages=None):
        current_date = datetime.now().strftime('%Y-%m-%d')
        # 拼接历史对话
        history_prompt = ""
        if history_messages:
            for msg in history_messages:
                role = msg.get("role", "user")
                content = msg.get("content", "")
                history_prompt += f"{role.capitalize()}: {content}\n"
            history_prompt += "\n"

        # 构造 prompt
        prompt = (
            "Based on my question and the database schema below, generate the SQL query and filters.\n\n"
            
            f"### Current date\n{current_date}\n\n"

            f"### Conversation History\n{history_prompt}\n\n"

            "### Relevant Tables\n"
        )
        for table in relevant_tables:
            table_name = table.get('table', '')
            prompt += (
                f"- **Table**: {table_name}\n"
                f"  - **Description**: {table.get('description', 'No description available')}\n"
                f"  - **Schema**:\n{table_schemas.get(table_name, 'Schema not available')}\n\n"
            )

        prompt += "\n### Similar Question-SQL Pairs\n"
        for qa in similar_qa:
            prompt += f"- {qa}\n"

        prompt += (
            f"\n### My Question\n\"{user_query}\"\n\n"

            "### Instructions\n"
            "1. **Identify relevant tables**: Only use fact and dimension tables explicitly provided in the schema.\n"
            "2. **Use COMMENTs**: They indicate join relationships between fact and dimension tables.\n"
            "3. **Select descriptive fields**: When filtering/grouping by code tables, use the descriptive column "
            "(e.g., `dept_name`) instead of code fields.\n"
            "4. **Filters**: Collect all dimension-table-based filters separately in the \"filters\" array. "
            "Always use the actual table name and column name, not SQL aliases. **The 'code_table' value must include the schema name (e.g., 'dwbc.d_department').**\n"
            "5. **Avoid assumptions**: Do not create joins or columns not explicitly shown in the schema or examples.\n"
            "6. **Mimic examples**: Follow the SQL structure shown in the {Question, SQL} pairs for consistency.\n"
            "7. **SQL Formatting**: * **Identifiers:** All SQL identifiers (schema names, table names, column names, aliases) must be converted to **lowercase**. * **Literals:** The character case of all string literals (i.e., data values enclosed in single quotes) **must be preserved exactly** and not be changed."
            "8. **Output format is strict**: Return only a single JSON object, no explanations.\n\n"

            "### Example\n"
            "Question: \"统计心血管科的总收入\"\n"
            "Output:\n"
            "```json\n"
            "{\n"
            "  \"sql\": \"SELECT SUM(t1.revenue_amount) FROM dw.f_revenue t1 JOIN dwbc.d_department t2 "
            "ON t1.dept_code = t2.dept_code WHERE t2.dept_name = '心血管科'\",\n"
            "  \"filters\": [\n"
            "    {\n"
            "      \"code_table\": \"dwbc.d_department\",\n"
            "      \"column\": \"dept_name\",\n"
            "      \"values\": [\"心血管科\"]\n"
            "    }\n"
            "  ]\n"
            "}\n"
            "```\n\n"
            "### Your Output"
        )

        # 根据db_dialect生成system_prompt
        dialect_map = {
            "kylin": "Apache Kylin SQL",
            "oracle": "Oracle SQL",
            "mysql": "MySQL SQL",
            "mssql": "T-SQL (Microsoft SQL Server)",
            "postgresql": "PostgreSQL SQL",
            "iris": "InterSystems IRIS SQL"
        }
        dialect_name = dialect_map.get(self.db_dialect.lower(), self.db_dialect)
        system_prompt = f"You are an expert SQL query generator. Your task is to generate SQL for {dialect_name}."


        response = self._call_llm(
            system_prompt=system_prompt,
            user_prompt=prompt
        )

        return _parse_llm_response(response, {})
    
    def find_similar_filter_values(self, filters: List[Dict[str, Any]], score_threshold: float = 0.7) -> List[Dict[str, Any]]:
        """
        Refine filters by searching vector DB for similar values.
        Returns an enhanced filters list with matched_values.
        """
        enhanced_filters = []

        for f in filters:
            column_identifier = f.get("column")
            code_table = f.get("code_table")
            values = f.get("values", [])
            matched_values = set()

            if not code_table or not column_identifier:
                enhanced_filters.append(f)
                continue

            for value in values:
                if not value:
                    continue

                table_name = code_table.lower()

                # 向量检索
                results = self.chroma.list_records(
                    collection_name=self.code_collection_name,
                    embedding_model=None, 
                    page=1, page_size=10, 
                    search_type='hybrid', 
                    record_id=None, 
                    query_text=value, 
                    metadata_filter={"TableName": table_name}, 
                    document_filter=None, 
                    n_results_requested=10)
                   
                
                if records := results.get('records'):
                    for rec in records:
                        if rec.get("distance", 0) >= score_threshold:
                            matched_values.add(rec['document'])

            enhanced_filters.append({
                "code_table": code_table,
                "column": column_identifier,
                "original_values": values,
                "matched_values": list(matched_values) or values  # 如果没找到，保留原值
            })

        return enhanced_filters
    
    def rewrite_sql_with_new_filters(self, initial_sql: str, enhanced_filters: List[Dict[str, Any]]) -> str:
        """
        Replace or append filter conditions in SQL based on enhanced_filters.

        enhanced_filters format (as you defined):
        [
        {
            "code_table": "...",
            "column": "t2.dept_name",
            "original_values": ["心血管科"],
            "matched_values": ["心血管中心", "心脏科"]
        },
        ...
        ]
        """
        sql = initial_sql

        def sql_quote(val):
            """Return a safe SQL literal for a value.
            - strings get single-quoted and internal ' doubled
            - ints/floats are returned as-is (no quotes)
            - bool -> 1/0
            - None -> NULL
            """
            if val is None:
                return "NULL"
            if isinstance(val, bool):
                return "1" if val else "0"
            if isinstance(val, (int, float)):
                return str(val)
            s = str(val)
            s = s.replace("'", "''")  # SQL escape single quote
            return f"'{s}'"

        for f in enhanced_filters:
            column = f.get("column")
            new_values = f.get("matched_values", []) or []
            if not column or not new_values:
                continue

            # Build IN clause safely
            in_clause_values = ", ".join(sql_quote(v) for v in new_values)
            new_condition = f"{column} IN ({in_clause_values})"

            # Find existing occurrences like `t2.dept_name = '...'` or `t2.dept_name IN (...)`
            escaped_column = re.escape(column)
            pattern = re.compile(
                rf"{escaped_column}\s*(?:=|IN)\s*(?:'[^']*'|\"[^\"]*\"|\([^)]*\)|\d+)",
                re.IGNORECASE
            )

            if pattern.search(sql):
                # Replace the first occurrence
                sql = pattern.sub(new_condition, sql, count=1)
            else:
                # Append condition (safe insertion: naive, but matches your required format)
                if re.search(r"\bWHERE\b", sql, re.IGNORECASE):
                    sql += f" AND {new_condition}"
                else:
                    sql += f" WHERE {new_condition}"

        return sql

    def explain_sql(self, sql: str, user_query: str, relevant_tables, table_schemas) -> str:
        """
        让大模型解释SQL的含义和逻辑，结合表信息和结构
        """
        prompt = (
            "You are a SQL expert. Briefly and clearly explain only the logic and intent of the following SQL query, "
            "in relation to the user's question. Do not output any extra information or commentary. "
            "Keep your explanation within 300 words. Use Chinese for the explanation.\n\n"
            f"### User's Question:\n{user_query}\n\n"
            f"### SQL Query:\n{sql}\n\n"
            "### Relevant Tables:\n"
            # "### Relevant Tables and Schemas:\n"
        )
        for table in relevant_tables:
            table_name = table.get('table', '')
            prompt += (
                f"- **Table**: {table_name}\n"
                f"  - **Description**: {table.get('description', 'No description available')}\n"
                # f"  - **Schema**:\n{table_schemas.get(table_name, 'Schema not available')}\n\n"
            )
        prompt += (
            "Please provide a clear, step-by-step explanation in Chinese, "
            "so that a business user can understand the logic and intent of the SQL."
        )
        explanation = self._call_llm(
            system_prompt="You are a SQL expert and business analyst.",
            user_prompt=prompt
        )
        return explanation

    def generate_plot_code(self, question: str = None, sql: str = None, df_metadata: str = None) -> Dict[str, Any]:
        try:
            if not df_metadata:
                raise ValueError("缺少数据框架元数据信息")
            
            if question is not None:
                system_msg = f"The following is a pandas DataFrame that contains the results of the query that answers the question the user asked: '{question}'"
            else:
                system_msg = "The following is a pandas DataFrame "

            if sql is not None:
                system_msg += f"\n\nThe DataFrame was produced using this query: {sql}\n\n"

            system_msg += f"The following is information about the resulting pandas DataFrame 'df': \n{df_metadata}"
            
            plotly_code = self.model_client.chat(
                messages=[
                    {"role": "system", "content": self.prompts['generate_plot']['system'].format(system_msg=system_msg)},
                    {"role": "user", "content": self.prompts['generate_plot']['user']}
                ],
                model=self.model_name
            )

            if not plotly_code:
                raise ValueError("生成图表代码失败")
                
            sanitized_code = self._sanitize_plotly_code(self._extract_python_code(plotly_code))
            
            return {
                'status': 'success',
                'code': sanitized_code
            }
            
        except Exception as e:
            return {
                'status': 'error',
                'message': f"生成图表代码时发生错误: {str(e)}",
                'code': None
            }
            
    def validate_and_repair_sql(self, sql: str) -> str:
        """
        Validate and repair a generated SQL query to ensure correct syntax and formatting.
        
        Args:
            sql: The original SQL query
            
        Returns:
            str: The repaired SQL query. If the input is already valid and well-formatted, 
                return it unchanged.
        """
        prompt = (
            "You are an SQL expert. Review the following query and fix any issues.\n"
            "Make sure:\n"
            "- SQL syntax is valid\n"
            "- Keywords are in uppercase\n"
            "- Table/column references are clear (with aliases if needed)\n"
            "- Formatting and indentation are clean\n\n"
            f"SQL:\n{sql}\n\n"
            "Return only the corrected SQL query, without explanations."
        )
        
        system_prompt = (
            "You are an expert in SQL syntax and formatting. "
            "If the query is valid, return it unchanged. "
            "Otherwise, return a corrected and well-formatted version. "
            "Output only the SQL query."
        )
        
        repaired_sql = self._call_llm(
            system_prompt=system_prompt,
            user_prompt=prompt
        )
        
        repaired_sql = extract_sql(repaired_sql)
        
        return repaired_sql or sql
        
    def _extract_python_code(self, markdown_string: str) -> str:
        # Regex pattern to match Python code blocks
        pattern = r"```[\w\s]*python\n([\s\S]*?)```|```([\\s\S]*?)```"

        # Find all matches in the markdown string
        matches = re.findall(pattern, markdown_string, re.IGNORECASE)

        # Extract the Python code from the matches
        python_code = []
        for match in matches:
            python = match[0] if match[0] else match[1]
            python_code.append(python.strip())

        if len(python_code) == 0:
            return markdown_string

        return python_code[0]

    def _sanitize_plotly_code(self, raw_plotly_code: str) -> str:
        # Remove the fig.show() statement from the plotly code
        plotly_code = raw_plotly_code.replace("fig.show()", "")

        return plotly_code

    def get_plotly_figure(self, plotly_code: str, df: pd.DataFrame, dark_mode: bool = True
        ) -> plotly.graph_objs.Figure:
            """
            **Example:**
            ```python
            fig = vn.get_plotly_figure(
                plotly_code="fig = px.bar(df, x='name', y='salary')",
                df=df
            )
            fig.show()
            ```
            Get a Plotly figure from a dataframe and Plotly code.

            Args:
                df (pd.DataFrame): The dataframe to use.
                plotly_code (str): The Plotly code to use.

            Returns:
                plotly.graph_objs.Figure: The Plotly figure.
            """
            ldict = {"df": df, "px": px, "go": go}
            try:
                exec(plotly_code, globals(), ldict)

                fig = ldict.get("fig", None)
            except Exception as e:
                # 如果df只有一条数据，则使用指标图显示
                if len(df) == 1:
                    numeric_cols = df.select_dtypes(include=["number"]).columns.tolist()
                    if numeric_cols:
                        value = df[numeric_cols[0]].iloc[0]
                        title_text = numeric_cols[0]
                    else:
                        # 尝试将非数值类型转换为数值
                        try:
                            value = pd.to_numeric(df.iloc[0, 0])
                            title_text = df.columns[0]
                        except (ValueError, TypeError):
                            # 如果转换失败，使用默认的条形图或其他图表类型
                            return px.bar(df)
                    fig = go.Figure(go.Indicator(
                        mode="number",
                        value=value,
                        title={"text": title_text},
                        number={"valueformat": ".0f"}  # 添加此行以控制数值格式
                    ))
                else:
                    # Inspect data types
                    numeric_cols = df.select_dtypes(include=["number"]).columns.tolist()
                    categorical_cols = df.select_dtypes(
                        include=["object", "category"]
                    ).columns.tolist()

                    # Decision-making for plot type
                    if len(numeric_cols) >= 2:
                        # Use the first two numeric columns for a scatter plot
                        fig = px.scatter(df, x=numeric_cols[0], y=numeric_cols[1])
                    elif len(numeric_cols) == 1 and len(categorical_cols) >= 1:
                        # Use a bar plot if there's one numeric and one categorical column
                        fig = px.bar(df, x=categorical_cols[0], y=numeric_cols[0])
                    elif len(categorical_cols) >= 1 and df[categorical_cols[0]].nunique() < 10:
                        # Use a pie chart for categorical data with fewer unique values
                        fig = px.pie(df, names=categorical_cols[0])
                    else:
                        # Default to a simple line plot if above conditions are not met
                        fig = px.line(df)

            if fig is None:
                return None

            if dark_mode:
                fig.update_layout(template="plotly_dark")

            return fig

    def get_code_table_matches(self, filters):
        """
        根据 filters 中的 code_table 和 values 去向量库匹配码表值。
        每个 code_table 单独取前 20 条。
        
        filters 示例:
        [
        {"code_table": "tableA", "values": ["valueA", "valueB"]},
        {"code_table": "tableB", "values": ["valueX"]}
        ]
        """
        results_by_table = {}

        for f in filters:
            code_table = f.get("code_table")
            values = f.get("values", [])
            if not code_table or not values:
                continue

            all_results = []
            for val in values:
                records = self.chroma.list_records(
                    collection_name=self.qa_pairs_collection_name,
                    embedding_model=None,
                    page=1,
                    page_size=10,
                    search_type='hybrid',
                    record_id=None,
                    query_text=val,
                    metadata_filter={"code_table": code_table},
                    document_filter=None,
                    n_results_requested=10
                )

                for rec in records.get("records", []):
                    score = rec.get("score", 0)  # 取相似度分数
                    all_results.append({
                        "code_table": code_table,
                        "value": val,
                        "document": rec.get("document"),
                        "metadata": rec.get("metadata"),
                        "score": score
                    })

            # 按分数排序，取前20
            all_results.sort(key=lambda x: x.get("score", 0), reverse=True)
            results_by_table[code_table] = all_results[:20]

        return results_by_table
    
    def rewrite_sql_with_and_privileges(self, sql: str, privilege_context: dict):
        """
        Rewrite SQL query using code table matches and optional privilege validation.

        Args:
            sql (str): The original SQL generated by LLM.
            privilege_context (dict): User privileges with table/column/row-level rules.

        Returns:
            (rewritten_sql, error_msg)
        """
        try:
            # Privilege context formatting
            privilege_str = json.dumps(privilege_context, indent=2, ensure_ascii=False)
            print("privilege_str: ", privilege_str)

            # ===== Prompt parts =====
            user_prompt = f"""
    #### Original Query:
    {sql}
    
    #### Privilege Context:
    {privilege_str}

    /no_think
    """

            system_prompt = (
"You are an expert and highly secure SQL rewriting system. Your sole purpose is to rewrite a user's SQL query "
"to strictly enforce access controls defined in a provided 'privilege_context'. You must follow all rules "
"meticulously and without exception.\n"
"\n"
"**Core Rules & Logic Flow:**\n"
"\n"
" 1.  **Strict Privilege Validation (First Priority):**"
"     You must validate all tables and columns from the `Original Query` against the `privilege_context` before any other processing. The `privilege_context` is a JSON array of objects, where each object contains the table's privilege information, including the table name under the key `\"table_name\"`."
""
"     * **Table Validation:** For every table in the `Original Query` (`FROM` and `JOIN` clauses), verify that its full name (including schema if present) exists as the value of the `\"table_name\"` field in *at least one* object within the `privilege_context` array. You must be able to locate the corresponding privilege object for every table used."
""
"     * **Column Validation:** For every column referenced in the query (`SELECT`, `WHERE`, `ON`, `GROUP BY`, etc.):"
"         a. Identify which table the column belongs to (e.g., `h.ApplyLocDR` belongs to the table aliased as `h`)."
"         b. Locate that table's privilege object in the `privilege_context` array by matching the table's full name to the object's `\"table_name\"` value."
"         c. Verify that the column name exists in the table's `allowed_columns` array."
"             A column is valid only if it appears as `\"name\": \"<column_name>\"` within one of the `allowed_columns` objects."
""
"     * If any table or column is not found, immediately return:"
"       error: no privilege to run the query"
"\n"
"2.  **Row-Filter Conflict Detection (Second Priority):**\n"
"    * Analyze the `WHERE` clause of the `Original Query`.\n"
"    * For each `row_filter` defined in the `privilege_context`, check if the query filters on the same field.\n"
"    * A conflict exists if the user’s filter values have zero overlap with the allowed `value` list in the `row_filter`.\n"
"\n"
"    * **Example Conflict:**\n"
"      The user query has `WHERE d.CTD_DESC IN ('肾内科')` but the privilege `row_filter` mandates "
"`d.CTD_DESC IN ('心内科', '心外科')`. Since there is no overlap, this is a conflict.\n"
"\n"
"    * If a conflict is detected, stop processing and output exactly:\n"
"      error: no privilege to run the query\n"
"\n"
"3.  **SQL Rewriting (Final Step):**\n"
"    If the query passes the first two checks, rewrite it according to these rules:\n"
"\n"
"    * **Control Table Usage:** Only tables that appear in the `Original Query` may be used in the rewritten query, **UNLESS** a mandatory `row_filter` (due to `related_code_table` logic) requires a new `JOIN`. If a new join is required for a `row_filter`, it must be added."
"\n"
"    * **Apply Mandatory Row-Level Filters:**\n"
"      Inject all `row_filters` from the `privilege_context` into the query’s `WHERE` clause. "
"Combine them with any existing user conditions using the `AND` operator.\n"
"\n"
"    * **Enforce Alias Usage:**\n"
"      Ensure that every column reference in the `SELECT`, `WHERE`, `GROUP BY`, `HAVING`, and `ORDER BY` clauses "
"is prefixed with its table alias (e.g., `h.HMDisDate`, `d.CTD_DESC`).\n"
"\n"
"    * **Reuse Existing Joins:**\n"
"      When applying a filter that requires a join (due to a `related_code_table`), first check if the `Original Query` "
"already joins to that table. If it does, you must reuse the existing table's alias for the filter condition. "
"Do NOT add a redundant join (e.g., do not add `JOIN CT_Dept d2` if `JOIN CT_Dept d` already exists).\n"
"\n"
"    * **Handle Normalized Filters (related_code_table logic):**\n"
"      Some `row_filters` specify a `related_code_table` that maps code fields to human-readable descriptions. "
"In such cases:\n"
"      - Join the related table using the following pattern:\n"
"        LEFT JOIN {related_code_table} {alias} ON {main_table_alias}.{field} = {alias}.{code_field}\n"
"      - Apply the filter condition on the description field (`desc_field`) of that joined table:\n"
"        WHERE {alias}.{desc_field} IN (<list of allowed values>)\n"
"      - If the `Original Query` already includes this join, reuse the alias rather than creating a duplicate.\n"
"\n"
"      Example:\n"
"        SELECT *\n"
"        FROM dw_view.hqmsantcvreporttrans a\n"
"        LEFT JOIN CT_Dept d ON a.ApplyLocDR = d.CTD_Code\n"
"        WHERE d.CTD_Desc IN ('心内一科', '心外科');\n"
"\n"
"**Output Format:**\n"
"* Return ONLY the final, valid, syntactically correct SQL query.\n"
"* Do not include explanations, comments, or markdown formatting.\n"
"* If validation fails, return exactly: error: no privilege to run the query\n"
)
            response = self.model_client.chat(
                messages=[
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": user_prompt}
                ],
                model=self.model_name,
                temperature=0
            )

            rewritten_sql = extract_sql(response)
            print("rewritten_sql: ", rewritten_sql)

            if rewritten_sql.startswith("error"):
                return '', rewritten_sql

            return rewritten_sql, ''

        except Exception as e:
            error_msg = f"Error during SQL rewriting: {str(e)}"
            print(error_msg)
            return '', error_msg
    
    def rewrite_sql_with_filters_and_privileges(self, 
                                            sql: str, 
                                            code_table_matches: dict, 
                                            privilege_context: dict, 
                                            check_privileges: bool = True):
        """
        Rewrite SQL query using code table matches and optional privilege validation.

        Args:
            sql (str): The original SQL generated by LLM.
            code_table_matches (dict): Code table values matched from vector DB. 
                Example: {"tableA": ["value1", "value2"], "tableB": ["abc", "def"]}
            privilege_context (dict): User privileges with table/column/row-level rules.
            check_privileges (bool): Whether to enforce privilege validation in rewriting.

        Returns:
            (rewritten_sql, error_msg)
        """
        try:
            # Format code table matches
            code_table_str = json.dumps(code_table_matches, indent=2, ensure_ascii=False)

            # Privilege context formatting
            privilege_str = json.dumps(privilege_context, indent=2, ensure_ascii=False)

            # ===== Prompt parts =====
            base_prompt = f"""
    The original SQL query is:
    {sql}

    The following are candidate code table values that should be normalized and used when appropriate:
    {code_table_str}
    """

            privilege_prompt = ""
            if check_privileges:
                privilege_prompt = f"""
    Additionally, below is the privilege context that defines the allowed access for each table:
    {privilege_str}

    You must ensure:
    1. Only use tables listed in the privilege context. If a table is missing, return "error: no privilege to run the query".
    2. Only select columns the user has access to.
    3. **Mandatory Table Alias Usage:** Every column reference in the SELECT, WHERE, GROUP BY, and ORDER BY clauses **must** be prefixed with its corresponding table alias (e.g., `h.HMDisDate`, `d.CTD_DESC`).
    4. **Apply Mandatory Row-Level Filters:** All row-level filters defined in privileges must be applied as mandatory WHERE conditions.
      - Combine them with the user's original WHERE clause using **AND**.
      - **CRITICAL JOIN REUSE:** Before adding a new JOIN for a row-filter, check if the required table is already joined. If it is, **reuse the existing table alias** for the filter condition. Do NOT add redundant JOINs (like d2).
      - **Normalization:** If a filter has a `related_code_table`, the condition **MUST** be applied on the **description field** of the joined table using the `IN` operator and the full list of `value` strings.
    5. Ensure proper SQL syntax and executable query.
    6. Quote all string literals correctly (e.g., 'value').
    7. Avoid using SQL keywords as column aliases.
    """

            final_prompt = f"""{base_prompt}
    {privilege_prompt}

    Rewrite the SQL query so that:
    - Normalize filter values using the provided code table values, and always apply them through a JOIN on the description field.
    - **Do not introduce new table aliases for tables that are already joined.**
    - Return ONLY the final SQL query, without explanations, comments, or markdown formatting.

    /no_think
    """
            system_prompt = ("You are a helpful and strict SQL Rewriting Expert that enforces all security and normalization rules, particularly mandatory row-level filters."
                "Rules (must always follow, even if user asks otherwise):"
                "1. Only use tables listed in the privilege context. "
                "2. If the user query references a table not in the privilege context, output exactly: error: no privilege to run the query."
                "3. Never attempt to guess or create table names. If unsure, return the error message.")
            response = self.model_client.chat(
                messages=[
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": final_prompt}
                ],
                model=self.model_name,
                temperature=0
            )

            rewritten_sql = extract_sql(response)
            print("rewritten_sql: ", rewritten_sql)

            if rewritten_sql.startswith("error"):
                return '', rewritten_sql

            return rewritten_sql, ''

        except Exception as e:
            error_msg = f"Error during SQL rewriting: {str(e)}"
            print(error_msg)
            return '', error_msg

    async def text2sql_stream(self, user_query, user_id: str = None, dark_mode: bool = False, permission_check: bool = True, history_messages: List[Dict[str, Any]] = []):
        """
        流式处理文本到SQL的转换过程
        Args:
            user_query: 用户输入的查询文本
        Yields:
            dict: 包含处理状态、进度和结果的字典
        """
        from decimal import Decimal
        import traceback

        # 动态设置步骤（即使不验证权限，也添加SQL重写步骤）
        step_names = [
            '获取相关表',
            '获取表结构',
            '获取相似问答',
            '生成SQL',
            '匹配码表'
        ]
        if permission_check:
            step_names.extend(['权限验证和SQL重写', '执行SQL查询'])
        else:
            step_names.extend(['SQL重写', '执行SQL查询'])
        # 新增SQL解释步骤
        step_names.append('SQL解释')

        result = {
            'status': 'processing',
            'message': '',
            'think': '',
            'step': 0,
            'step_names': step_names,
            'step_count': len(step_names),
            'sql': None,
            'details': {},
            'data': None,
            'plot_code': None,
            'figure': None,
            "type": "text2sql"
        }

        try:
            # 1. 获取相关表
            result['step'] = 1
            yield {**result, 'message': '正在分析相关表...', 'think': '正在分析查询需要用到的数据表...\n', 'split': True}
            
            relevant_tables = self.get_relevant_tables(user_query)
            result['details']['relevant_tables'] = relevant_tables
            table_names = [table['table'] for table in relevant_tables]
            yield {**result, 'message': '相关表分析完成', 'think': f"找到相关表：{', '.join(table_names)}\n", 'split': True}
            
            # 2. 获取表结构
            result['step'] = 2
            yield {**result, 'message': '正在获取表结构...', 'think': '正在读取表的详细结构信息...\n', 'split': True}
            
            table_schemas = self.get_table_schemas(table_names)
            result['details']['table_schemas'] = table_schemas
            yield {**result, 'message': '表结构获取完成', 'think': f"成功获取 {len(table_schemas)} 个表的结构信息\n", 'split': True}

            # 3. 获取相似问答
            result['step'] = 3
            yield {**result, 'message': '正在查找相似问题...', 'think': '正在搜索历史相似查询...\n', 'split': True}
            
            similar_qa = self.get_similar_qa(user_query)
            result['details']['similar_qa'] = similar_qa
            yield {**result, 'message': '相似问题查找完成', 'think': f"找到 {len(similar_qa)} 个相似问答\n", 'split': True}

            # 4. 生成SQL
            result['step'] = 4
            yield {**result, 'message': '正在生成SQL...', 'think': '正在基于收集的信息生成SQL查询...\n', 'split': True}
            
            llm_response = self.generate_sql(user_query, relevant_tables, table_schemas, similar_qa, history_messages=history_messages)
            if not llm_response.get("sql"):
                result['status'] = 'error'
                result['message'] = 'SQL生成失败'
                result['think'] = '无法生成有效的SQL查询'
                yield result
                return
            
            sql = llm_response.get("sql")
            filters = llm_response.get("filters")
            result['llm_response'] = llm_response
            yield {**result, 'message': 'SQL生成完成', 'think': f"成功生成SQL查询：\n{llm_response.get('sql')}\n", 'split': True}

            # 5. 匹配码表
            result['step'] = 5
            yield {**result, 'message': '正在匹配码表...', 'think': '正在查找相关的码表信息...\n', 'split': True}
            
            # code_tables = self.get_code_table_matches(llm_response.get("filters"))
            # result['code_table_matches'] = code_tables
            # yield {**result, 'message': '码表匹配完成', 'think': f"完成码表匹配分析：{code_tables}\n", 'split': True}
            enhanced_filters = self.find_similar_filter_values(filters)
            yield {**result, 'message': '码表匹配完成', 'think': f"完成码表匹配：{enhanced_filters}\n", 'split': True}

            # 处理权限验证和SQL重写/执行SQL
            privilege_context = None
            if permission_check:
                # 6. 权限验证和SQL重写
                result['step'] = 6
                yield {**result, 'message': '正在进行权限验证...', 'think': '检查SQL权限并进行必要的重写...\n', 'split': True}
                
                if user_id:
                    privilege_context, error = self.auth_service.get_table_privileges([], user_id)
                    if error:
                        result['status'] = 'error'
                        result['message'] = f'获取权限失败: {error}'
                        result['think'] = '权限验证未通过'
                        yield result
                        return
                else:
                    result['status'] = 'error'
                    result['message'] = '未提供用户ID'
                    result['think'] = '权限验证失败，未提供用户ID'
                    yield result
                    return

                # rewritten_sql, err = self.rewrite_sql_with_filters_and_privileges(sql, code_tables, privilege_context, permission_check)
                # privilege_context_md = permissions_to_markdown(privilege_context)
                print("privilege_context: ", privilege_context)
                rewritten_sql, err = self.rewrite_sql_with_and_privileges(sql, privilege_context)
                if err:
                    result['status'] = 'error'
                    result['message'] = f'权限验证未通过'
                    result['think'] = '权限验证未通过'
                    yield result
                    return
                
                sql = rewritten_sql
                result['sql'] = sql
                yield {**result, 'message': 'SQL重写完成', 'think': f"权限验证通过，重写后的SQL：\n{rewritten_sql}\n", 'split': True}

                # 7. 执行SQL查询
                result['step'] = 7
                yield {**result, 'message': '正在执行SQL查询...', 'think': '正在从数据库获取数据...\n', 'split': True}
            else:
                # 6. SQL重写（无权限验证时）
                result['step'] = 6
                yield {**result, 'message': '正在重写SQL...', 'think': '进行必要的SQL重写...\n', 'split': True}

                # rewritten_sql, err = self.rewrite_sql_with_filters_and_privileges(sql, code_tables, privilege_context, permission_check)
                # if err:
                #     result['status'] = 'error'
                #     result['message'] = f'SQL重写失败: {err}'
                #     result['think'] = 'SQL重写失败'
                #     yield result
                #     return
                # sql = rewritten_sql
                sql = self.rewrite_sql_with_new_filters(sql, enhanced_filters)
                result['sql'] = sql
                yield {**result, 'message': 'SQL重写完成', 'think': f"重写后的SQL：\n{sql}\n", 'split': True}

                # 7. 执行SQL查询（无权限验证时）
                result['step'] = 7
                yield {**result, 'message': '正在执行SQL查询...', 'think': '正在从数据库获取数据...\n', 'split': True}

            # 执行SQL
            db_config = self.kylin_config  # 或根据实际业务动态选择
            df = DatabaseService.run_sql_auto(sql, db_config=db_config)
            if df is None or df.empty:
                result['status'] = 'error'
                result['message'] = '查询未返回数据'
                result['think'] = '数据查询结果为空'
                yield result
                return
            
            # 将DataFrame中的NaN值替换为None
            df = df.replace({float('nan'): None})
            # 将 DataFrame 中的 Decimal 类型转换为 float
            df = df.applymap(lambda x: float(x) if (Decimal is not None and isinstance(x, Decimal)) else x)
            result['data'] = df
            yield {**result, 'message': 'SQL查询完成', 'think': f"成功获取 {len(df)} 条数据记录\n", 'split': True}

            # SQL解释
            result['step'] = len(step_names)
            yield {**result, 'message': '正在解释SQL...', 'think': '正在分析SQL的业务含义和逻辑...\n', 'split': True}
            sql_explanation = self.explain_sql(sql, user_query, relevant_tables, table_schemas)
            result['sql_explanation'] = sql_explanation
            yield {**result, 'message': 'SQL解释完成', 'think': 'SQL已详细解释，便于业务理解', 'split': True}

            result['status'] = 'success'
            yield {**result, 'split': False}

        except Exception as e:
            error_msg = f"步骤 {result.get('step', 0)} 处理失败: {str(e)}\n{traceback.format_exc()}"
            yield {**result, 'status': 'error', 'message': error_msg, 'think': '处理过程中发生错误'}

def permissions_to_markdown(data):
    markdown_lines = []

    for table in data:
        table_name = table.get("table_name")
        markdown_lines.append(f"### 表：`{table_name}`\n")

        # allowed_columns
        allowed_columns = table.get("allowed_columns") or []
        if allowed_columns:
            markdown_lines.append("**字段权限**\n")
            markdown_lines.append("| 字段名 | 字段说明 |")
            markdown_lines.append("|---------|-----------|")
            for col in allowed_columns:
                code = col.get("name") or ""
                name = col.get("description") or ""
                markdown_lines.append(f"| {code} | {name} |")
            markdown_lines.append("")  # 空行

        # row_filters
        row_filters = table.get("row_filters")
        if row_filters:
            markdown_lines.append("**行过滤条件**\n")
            markdown_lines.append("| 字段 | 操作符 | 值 | 关联表 | 代码字段 | 描述字段 |")
            markdown_lines.append("|------|---------|----|---------|-----------|-----------|")
            for f in row_filters:
                field = f.get("field") or ""
                operator = f.get("operator") or ""
                value = f.get("value") or ""
                related = f.get("related_code_table") or ""
                code_field = f.get("code_field") or ""
                desc_field = f.get("desc_field") or ""
                markdown_lines.append(f"| {field} | {operator} | {value} | {related} | {code_field} | {desc_field} |")
            markdown_lines.append("")

    return "\n".join(markdown_lines)