﻿# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

from __future__ import annotations

import os
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import time
from typing import Dict, Optional, Any
from datetime import datetime
import json
import hashlib
from pydantic import BaseModel
import logging
from typing import Dict, List, Optional, Any, Set
from dataclasses import dataclass
from datetime import datetime
import logging
from sqlalchemy import inspect
from sqlalchemy.engine import Engine
from sqlalchemy.sql import text
import sqlparse
from sqlparse.sql import Token, TokenList, Where, Comparison, Identifier
from sqlparse.tokens import Keyword, DML, Punctuation

logger = logging.getLogger(__name__)

from langchain.agents import create_react_agent
from langchain.agents.agent import AgentExecutor, RunnableAgent
from langchain.agents.agent_types import AgentType
from langchain.agents.mrkl import prompt as react_prompt
from langchain.chains.llm import LLMChain
from langchain_community.agent_toolkits.sql.prompt import SQL_PREFIX, SQL_SUFFIX
from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
from langchain_community.tools.sql_database.prompt import QUERY_CHECKER
from langchain_community.tools.sql_database.tool import InfoSQLDatabaseTool, ListSQLDatabaseTool
from langchain_community.utilities.sql_database import SQLDatabase
from langchain_core.callbacks import AsyncCallbackManagerForToolRun, BaseCallbackManager, CallbackManagerForToolRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate, PromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
from langchain_core.tools import BaseTool
from langchain_huggingface import HuggingFaceEndpoint
from sqlalchemy.engine import Result

from comps import CustomLogger

generation_params = {
    "max_new_tokens": 1024,
    "top_k": 10,
    "top_p": 0.95,
    "temperature": 0.01,
    "repetition_penalty": 1.03,
    "streaming": True,
}


TGI_LLM_ENDPOINT = os.environ.get("TGI_LLM_ENDPOINT")

llm = HuggingFaceEndpoint(
    endpoint_url=TGI_LLM_ENDPOINT,
    task="text-generation",
    **generation_params,
)

sql_params = {
    "max_string_length": 3600,
}

logger = CustomLogger("comps-texttosql")
logflag = os.getenv("LOGFLAG", False)

# https://github.com/langchain-ai/langchain/issues/23585


class BaseSQLDatabaseTool(BaseModel):
    """Base tool for interacting with a SQL database."""

    db: SQLDatabase = Field(exclude=True)

    class Config(BaseTool.Config):
        pass


class _QuerySQLDataBaseToolInput(BaseModel):
    query: str = Field(..., description="A detailed and correct SQL query.")


class CustomQuerySQLDataBaseTool(BaseSQLDatabaseTool, BaseTool):
    """Tool for querying a SQL database."""

    name: str = "sql_db_query"
    description: str = """
    Execute a SQL query against the database and get back the result..
    If the query is not correct, an error message will be returned.
    If an error is returned, rewrite the query, check the query, and try again.
    """
    args_schema: Type[BaseModel] = _QuerySQLDataBaseToolInput

    def _run(
        self,
        query: str,
        run_manager: Optional[CallbackManagerForToolRun] = None,
    ) -> Union[str, Sequence[Dict[str, Any]], Result]:
        """Execute the query, return the results or an error message."""
        logger.info("query: {}".format(query))
        query = query.replace("\nObservation", "")
        result = self.db.run_no_throw(query)
        return result


class _InfoSQLDatabaseToolInput(BaseModel):
    table_names: str = Field(
        ...,
        description=(
            "A comma-separated list of the table names for which to return the schema. "
            "Example input: 'table1, table2, table3'"
        ),
    )


class CustomInfoSQLDatabaseTool(BaseSQLDatabaseTool, BaseTool):
    """Tool for getting metadata about a SQL database."""

    name: str = "sql_db_schema"
    description: str = "Get the schema and sample rows for the specified SQL tables."
    args_schema: Type[BaseModel] = _InfoSQLDatabaseToolInput

    def _run(
        self,
        table_names: str,
        run_manager: Optional[CallbackManagerForToolRun] = None,
    ) -> str:
        """Get the schema for tables in a comma-separated list."""
        table_names = table_names.replace("\nObservation", "")  # this changed
        return self.db.get_table_info_no_throw([t.strip() for t in table_names.split(",")])


class _ListSQLDataBaseToolInput(BaseModel):
    tool_input: str = Field("", description="An empty string")


class CustomListSQLDatabaseTool(BaseSQLDatabaseTool, BaseTool):
    """Tool for getting tables names."""

    name: str = "sql_db_list_tables"
    description: str = "Input is an empty string, output is a comma-separated list of tables in the database."
    args_schema: Type[BaseModel] = _ListSQLDataBaseToolInput

    def _run(
        self,
        tool_input: str = "",
        run_manager: Optional[CallbackManagerForToolRun] = None,
    ) -> str:
        """Get a comma-separated list of table names."""
        return ", ".join(self.db.get_usable_table_names())


class _QuerySQLCheckerToolInput(BaseModel):
    query: str = Field(..., description="A detailed and SQL query to be checked.")


class CustomQuerySQLCheckerTool(BaseSQLDatabaseTool, BaseTool):
    """Use an LLM to check if a query is correct.

    Adapted from https://www.patterns.app/blog/2023/01/18/crunchbot-sql-analyst-gpt/
    """

    template: str = QUERY_CHECKER
    llm: BaseLanguageModel
    llm_chain: Any = Field(init=False)
    name: str = "sql_db_query_checker"
    description: str = """
    Use this tool to double check if your query is correct before executing it.
    Always use this tool before executing a query with sql_db_query!
    """
    args_schema: Type[BaseModel] = _QuerySQLCheckerToolInput

    @root_validator(pre=True)
    def initialize_llm_chain(cls, values: Dict[str, Any]) -> Dict[str, Any]:
        """Initializes the LLM chain if it does not exist in the given values dictionary."""

        if "llm_chain" not in values:
            values["llm_chain"] = LLMChain(
                llm=values.get("llm"),  # type: ignore[arg-type]
                prompt=PromptTemplate(template=QUERY_CHECKER, input_variables=["dialect", "query"]),
            )

        if values["llm_chain"].prompt.input_variables != ["dialect", "query"]:
            raise ValueError("LLM chain for QueryCheckerTool must have input variables ['query', 'dialect']")

        return values

    def _run(
        self,
        query: str,
        run_manager: Optional[CallbackManagerForToolRun] = None,
    ) -> str:
        """Use the LLM to check the query."""
        return self.llm_chain.predict(
            query=query,
            dialect=self.db.dialect,
            callbacks=run_manager.get_child() if run_manager else None,
        )

    async def _arun(
        self,
        query: str,
        run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
    ) -> str:
        return await self.llm_chain.apredict(
            query=query,
            dialect=self.db.dialect,
            callbacks=run_manager.get_child() if run_manager else None,
        )


class CustomSQLDatabaseToolkit(SQLDatabaseToolkit):
    """Provides functionality to manage and manipulate SQL databases in customized way."""

    def get_tools(self) -> List[BaseTool]:
        """Get the tools in the toolkit."""
        list_sql_database_tool = CustomListSQLDatabaseTool(db=self.db)
        info_sql_database_tool_description = (
            "Input to this tool is a comma-separated list of tables, output is the "
            "schema and sample rows for those tables. "
            "Be sure that the tables actually exist by calling "
            f"{list_sql_database_tool.name} first! "
            "Example Input: table1, table2, table3"
        )
        info_sql_database_tool = CustomInfoSQLDatabaseTool(db=self.db, description=info_sql_database_tool_description)
        query_sql_database_tool_description = (
            "Input to this tool is a detailed and correct SQL query, output is a "
            "result from the database. If the query is not correct, an error message "
            "will be returned. If an error is returned, rewrite the query, check the "
            "query, and try again. If you encounter an issue with Unknown column "
            f"'xxxx' in 'field list', use {info_sql_database_tool.name} "
            "to query the correct table fields."
        )
        query_sql_database_tool = CustomQuerySQLDataBaseTool(
            db=self.db, description=query_sql_database_tool_description
        )
        query_sql_checker_tool_description = (
            "Use this tool to double check if your query is correct before executing "
            "it. Always use this tool before executing a query with "
            f"{query_sql_database_tool.name}!"
        )
        query_sql_checker_tool = CustomQuerySQLCheckerTool(
            db=self.db, llm=self.llm, description=query_sql_checker_tool_description
        )
        return [
            query_sql_database_tool,
            info_sql_database_tool,
            list_sql_database_tool,
            query_sql_checker_tool,
        ]


def custom_create_sql_agent(
    llm: BaseLanguageModel,
    toolkit: Optional[SQLDatabaseToolkit] = None,
    callback_manager: Optional[BaseCallbackManager] = None,
    prefix: Optional[str] = None,
    suffix: Optional[str] = None,
    format_instructions: Optional[str] = None,
    top_k: int = 3,
    max_iterations: Optional[int] = 15,
    max_execution_time: Optional[float] = None,
    early_stopping_method: str = "force",
    verbose: bool = False,
    agent_executor_kwargs: Optional[Dict[str, Any]] = None,
    *,
    db: Optional[SQLDatabase] = None,
    prompt: Optional[BasePromptTemplate] = None,
    **kwargs: Any,
) -> AgentExecutor:
    """Creates a SQL agent with specified parameters."""

    tools = toolkit.get_tools()
    if prompt is None:
        prefix = prefix or SQL_PREFIX
        prefix = prefix.format(dialect=toolkit.dialect, top_k=top_k)
    else:
        if "top_k" in prompt.input_variables:
            prompt = prompt.partial(top_k=str(top_k))
        if "dialect" in prompt.input_variables:
            prompt = prompt.partial(dialect=toolkit.dialect)
        if any(key in prompt.input_variables for key in ["table_info", "table_names"]):
            db_context = toolkit.get_context()
            if "table_info" in prompt.input_variables:
                prompt = prompt.partial(table_info=db_context["table_info"])
                tools = [tool for tool in tools if not isinstance(tool, InfoSQLDatabaseTool)]
            if "table_names" in prompt.input_variables:
                prompt = prompt.partial(table_names=db_context["table_names"])
                tools = [tool for tool in tools if not isinstance(tool, ListSQLDatabaseTool)]

    if prompt is None:
        format_instructions = format_instructions or react_prompt.FORMAT_INSTRUCTIONS
        template = "\n\n".join(
            [
                prefix,
                "{tools}",
                format_instructions,
                suffix or SQL_SUFFIX,
            ]
        )
        prompt = PromptTemplate.from_template(template)

    agent = RunnableAgent(
        runnable=create_react_agent(llm, tools, prompt),
        input_keys_arg=["input"],
        return_keys_arg=["output"],
        **kwargs,
    )

    return AgentExecutor(
        name="SQL Agent Executor",
        agent=agent,
        tools=tools,
        callback_manager=callback_manager,
        verbose=verbose,
        max_iterations=max_iterations,
        max_execution_time=max_execution_time,
        early_stopping_method=early_stopping_method,
        handle_parsing_errors=True,
        **(agent_executor_kwargs or {}),
    )


def execute(input, url):
    """Execute a SQL query using the custom SQL agent.

    Args:
        input (str): The user's input.
        url (str): The URL of the database to connect to.

    Returns:
        dict: The result of the SQL execution.
    """
    db = SQLDatabase.from_uri(url, **sql_params)
    logger.info("Starting Agent")
    agent_executor = custom_create_sql_agent(
        llm=llm,
        verbose=True,
        toolkit=CustomSQLDatabaseToolkit(llm=llm, db=db),
        agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
        agent_executor_kwargs={"return_intermediate_steps": True},
    )

    result = agent_executor.invoke(input)

    query = []
    for log, _ in result["intermediate_steps"]:
        if log.tool == "sql_db_query":
            query.append(log.tool_input)
    result["sql"] = query[0].replace("Observation", "")
    return result


class SQLExample(BaseModel):
    """SQL示例的数据模型"""
    question: str
    sql: str
    explanation: Optional[str] = None

class SQLPromptGenerator:
    """SQL提示词生成器"""
    def __init__(self):
        self.examples = self._load_default_examples()
        
    def _load_default_examples(self) -> List[SQLExample]:
        """加载默认的示例"""
        return [
            SQLExample(
                question="查找销售额最高的5个产品",
                sql="""
                SELECT p.product_name, SUM(s.amount) as total_sales 
                FROM sales s
                JOIN products p ON s.product_id = p.id
                GROUP BY p.product_name
                ORDER BY total_sales DESC
                LIMIT 5
                """,
                explanation="这个查询通过JOIN连接sales和products表,计算每个产品的总销售额并返回前5名"
            ),
            SQLExample(
                question="统计每个客户在过去30天的订单数量",
                sql="""
                SELECT c.customer_name, COUNT(o.order_id) as order_count
                FROM customers c
                LEFT JOIN orders o ON c.customer_id = o.customer_id
                WHERE o.order_date >= CURRENT_DATE - INTERVAL '30 days'
                GROUP BY c.customer_name
                ORDER BY order_count DESC
                """,
                explanation="使用LEFT JOIN确保包含没有订单的客户,WHERE子句限制时间范围"
            )
        ]
    
    def generate_prompt(self, query: str, schema: Dict) -> str:
        """生成完整的提示词"""
        # 选择最相关的示例
        relevant_examples = self._select_relevant_examples(query, n=2)
        
        # 构建提示词
        prompt = f"""作为一个SQL专家,请将以下自然语言查询转换为SQL。

        数据库Schema信息:
        {self._format_schema(schema)}

        相关示例:
        {self._format_examples(relevant_examples)}



        用户查询: {query}

        请按以下步骤处理:
        1. 分析查询需求
        2. 确定需要的表和字段
        3. 构建SQL查询
        4. 确保语法正确

        SQL查询:"""
        
        return prompt
    
    def _select_relevant_examples(self, query: str, n: int = 2) -> List[SQLExample]:
        """选择最相关的示例"""
        # TODO: 实现基于相似度的示例选择
        # 当前简单返回前n个示例
        return self.examples[:n]
    
    def _format_schema(self, schema: Dict) -> str:
        """格式化Schema信息"""
        formatted = []
        for table, info in schema.items():
            columns = info.get("columns", [])
            relations = info.get("relations", [])
            
            formatted.append(f"表名: {table}")
            formatted.append("列:")
            for col in columns:
                formatted.append(f"  - {col['name']}: {col['type']}")
            
            if relations:
                formatted.append("关系:")
                for rel in relations:
                    formatted.append(f"  - {rel}")
            
            formatted.append("")
        
        return "\n".join(formatted)
    
    def _format_examples(self, examples: List[SQLExample]) -> str:
        """格式化示例"""
        formatted = []
        for i, example in enumerate(examples, 1):
            formatted.extend([
                f"示例 {i}:",
                f"问题: {example.question}",
                f"SQL: {example.sql}",
                f"解释: {example.explanation}",
                ""
            ])
        return "\n".join(formatted)

    def _load_examples(self) -> List[Dict]:
        return [
            {
                "question": "查找销售额最高的5个产品",
                "sql": """
                SELECT p.product_name, SUM(s.amount) as total_sales 
                FROM sales s
                JOIN products p ON s.product_id = p.id
                GROUP BY p.product_name
                ORDER BY total_sales DESC
                LIMIT 5
                """
            },
            # 添加更多示例...
        ]
    
    def _get_relevant_examples(self, query: str) -> str:
        # 使用语义相似度找出最相关的示例
        return self._select_examples(query, k=2)


class SQLError(Exception):
    """SQL相关错误的基类"""
    pass

class SQLSyntaxError(SQLError):
    """SQL语法错误"""
    pass

class TableNotFoundError(SQLError):
    """表不存在错误"""
    pass

class ColumnNotFoundError(SQLError):
    """列不存在错误"""
    pass

class SQLErrorHandler:
    """SQL错误处理器"""
    def handle_error(self, error: Exception, query: str, schema: Dict) -> Dict:
        """处理SQL执行过程中的错误"""
        error_info = {
            "success": False,
            "error_type": type(error).__name__,
            "message": str(error),
            "suggestion": None
        }
        
        try:
            if isinstance(error, SQLSyntaxError):
                error_info["suggestion"] = self._handle_syntax_error(error, query)
            elif isinstance(error, TableNotFoundError):
                error_info["suggestion"] = self._suggest_similar_table(error, schema)
            elif isinstance(error, ColumnNotFoundError):
                error_info["suggestion"] = self._suggest_similar_column(error, schema)
            else:
                error_info["suggestion"] = "请检查输入并重试"
                
        except Exception as e:
            logger.error(f"Error handling failed: {e}")
            error_info["suggestion"] = "发生未知错误"
            
        return error_info
    
    def _handle_syntax_error(self, error: SQLSyntaxError, query: str) -> str:
        """处理SQL语法错误"""
        # TODO: 实现更智能的语法错误处理
        return "SQL语法错误,请检查语法"
    
    def _suggest_similar_table(self, error: TableNotFoundError, schema: Dict) -> str:
        """推荐相似的表名"""
        # TODO: 实现模糊匹配查找相似表名
        return f"表不存在,可用的表: {', '.join(schema.keys())}"
    
    def _suggest_similar_column(self, error: ColumnNotFoundError, schema: Dict) -> str:
        """推荐相似的列名"""
        # TODO: 实现模糊匹配查找相似列名
        return "列不存在,请检查列名"


class SQLQueryOptimizer:
    def optimize_query(self, sql: str, schema: Dict) -> str:
        # 1. 分析查询计划
        plan = self.analyze_query_plan(sql)
        
        # 2. 优化JOIN顺序
        sql = self.optimize_joins(sql, plan)
        
        # 3. 添加适当的索引提示
        sql = self.add_index_hints(sql, schema)
        
        # 4. 优化WHERE条件顺序
        sql = self.optimize_where_conditions(sql)
        
        return sql
        
    def analyze_query_plan(self, sql: str) -> Dict:
        # 分析查询执行计划
        pass
        
    def optimize_joins(self, sql: str, plan: Dict) -> str:
        # 优化JOIN顺序
        pass



class SQLResultExplainer:
    def explain_result(self, sql: str, result: Any, original_query: str) -> Dict:
        return {
            "sql": sql,
            "result": result,
            "explanation": self.generate_explanation(result, original_query),
            "visualization_suggestion": self.suggest_visualization(result),
            "insights": self.extract_insights(result)
        }
        
    def generate_explanation(self, result: Any, query: str) -> str:
        # 生成自然语言解释
        pass
        
    def suggest_visualization(self, result: Any) -> Dict:
        # 推荐合适的可视化方式
        pass
        
    def extract_insights(self, result: Any) -> List[str]:
        # 提取数据洞察
        pass

class QueryCache:
    """SQL查询缓存
    
    用于缓存常见查询的结果,减少重复查询,提高响应速度
    
    属性:
        cache (Dict): 内存缓存字典
        ttl (int): 缓存过期时间(秒)
        max_size (int): 最大缓存条目数
    """
    
    def __init__(self, ttl: int = 3600, max_size: int = 1000):
        self.cache = {}
        self.ttl = ttl
        self.max_size = max_size
        self.hits = 0
        self.misses = 0
    
    def get(self, key: str) -> Optional[Dict]:
        """获取缓存的查询结果
        
        Args:
            key: 查询的唯一标识
            
        Returns:
            Optional[Dict]: 缓存的结果,如果不存在或已过期则返回None
        """
        # 生成缓存键
        cache_key = self._generate_key(key)
        
        # 获取缓存条目
        entry = self.cache.get(cache_key)
        if not entry:
            self.misses += 1
            return None
            
        # 检查是否过期
        if self._is_expired(entry["timestamp"]):
            self._remove(cache_key)
            self.misses += 1
            return None
            
        self.hits += 1
        return entry["result"]
    
    def set(self, key: str, value: Dict):
        """设置缓存
        
        Args:
            key: 查询的唯一标识
            value: 要缓存的结果
        """
        # 检查缓存大小
        if len(self.cache) >= self.max_size:
            self._evict_oldest()
            
        # 生成缓存键
        cache_key = self._generate_key(key)
        
        # 存储结果
        self.cache[cache_key] = {
            "result": value,
            "timestamp": time.time(),
            "key": key  # 保存原始键用于调试
        }
    
    def clear_expired(self):
        """清理过期的缓存条目"""
        current_time = time.time()
        expired_keys = [
            k for k, v in self.cache.items() 
            if self._is_expired(v["timestamp"])
        ]
        for k in expired_keys:
            self._remove(k)
    
    def get_stats(self) -> Dict:
        """获取缓存统计信息"""
        total_requests = self.hits + self.misses
        hit_rate = self.hits / total_requests if total_requests > 0 else 0
        
        return {
            "size": len(self.cache),
            "max_size": self.max_size,
            "hits": self.hits,
            "misses": self.misses,
            "hit_rate": hit_rate,
            "ttl": self.ttl
        }
    
    def _generate_key(self, key: str) -> str:
        """生成缓存键
        
        使用查询内容的哈希值作为缓存键,避免键过长
        """
        return hashlib.md5(key.encode()).hexdigest()
    
    def _is_expired(self, timestamp: float) -> bool:
        """检查缓存是否过期"""
        return time.time() - timestamp > self.ttl
    
    def _remove(self, key: str):
        """移除缓存条目"""
        if key in self.cache:
            del self.cache[key]
    
    def _evict_oldest(self):
        """移除最旧的缓存条目"""
        if not self.cache:
            return
            
        oldest_key = min(
            self.cache.keys(),
            key=lambda k: self.cache[k]["timestamp"]
        )
        self._remove(oldest_key)

class PerformanceOptimizer:
    def optimize(self, sql: str, schema: Dict) -> str:
        """优化SQL查询性能"""
        optimizations = [
            self._optimize_select_columns,
            self._optimize_join_order,
            self._optimize_where_conditions,
            self._add_index_hints
        ]
        
        for opt in optimizations:
            sql = opt(sql, schema)
            
        return sql
        
    def _optimize_select_columns(self, sql: str, schema: Dict) -> str:
        """优化选择的列"""
        pass
        
    def _optimize_join_order(self, sql: str, schema: Dict) -> str:
        """优化JOIN顺序"""
        pass


class EnhancedTextToSQL:
    def __init__(self):
        self.prompt_generator = SQLPromptGenerator()
        self.query_optimizer = SQLQueryOptimizer()
        self.error_handler = SQLErrorHandler()
        self.result_explainer = SQLResultExplainer()
        self.query_cache = QueryCache()
        self.schema_manager = SchemaManager()
        self.query_analyzer = QueryAnalyzer()
        self.result_validator = ResultValidator()
        self.performance_optimizer = PerformanceOptimizer()
        self.nl_generator = NLGenerator()
        
    async def execute(self, input_text: str, db_url: str) -> Dict:
        try:
            # 1. 缓存检查
            if cached := self.query_cache.get(input_text):
                return cached
                
            # 2. 数据库连接
            db = SQLDatabase.from_uri(db_url)
            
            # 3. Schema 获取
            schema = await self.schema_manager.get_schema(db)
            
            # 4. SQL生成
            sql = await self.generate_sql(input_text, schema)
            
            # 5. 查询分析
            analysis = self.query_analyzer.analyze_query(sql)
            
            # 6. 性能优化
            if analysis["complexity"] > "simple":
                sql = self.performance_optimizer.optimize(sql, schema)
                
            # 7. 执行查询
            result = await db.run_no_throw(sql)
            
            # 8. 结果验证
            if not self.result_validator.validate_result(sql, result):
                raise ValueError("Invalid query result")
                
            # 9. 生成解释
            explanation = self.nl_generator.generate_explanation(sql, result)
            
            # 10. 整理输出
            output = {
                "sql": sql,
                "result": result,
                "explanation": explanation,
                "analysis": analysis
            }
            
            # 11. 缓存结果
            self.query_cache.set(input_text, output)
            
            return output
            
        except Exception as e:
            return self.error_handler.handle_error(e, sql, schema)


class SQLMetricsCollector:
    def collect_metrics(self, query_info: Dict):
        metrics = {
            "query_time": query_info["execution_time"],
            "query_complexity": self.calculate_complexity(query_info["sql"]),
            "result_size": len(query_info["result"]),
            "cache_hit_rate": self.calculate_cache_hit_rate(),
            "error_rate": self.calculate_error_rate()
        }
        return metrics



@dataclass
class ColumnInfo:
    """列信息"""
    name: str
    type: str
    nullable: bool
    primary_key: bool
    foreign_key: Optional[str] = None
    description: Optional[str] = None

@dataclass
class TableRelation:
    """表关系"""
    from_table: str
    from_column: str
    to_table: str
    to_column: str
    relation_type: str  # 'ONE_TO_ONE', 'ONE_TO_MANY', 'MANY_TO_ONE'

class SchemaManager:
    """数据库Schema管理器
    
    负责提取和管理数据库schema信息,包括:
    - 表结构
    - 字段信息
    - 表关系
    - 示例数据
    """
    
    def __init__(self, refresh_interval: int = 3600):
        self.schema_cache = {}
        self.refresh_interval = refresh_interval
        self.last_refresh = {}
    
    async def get_schema(self, db: Any) -> Dict:
        """获取数据库schema信息
        
        Args:
            db: 数据库连接对象
            
        Returns:
            Dict: 包含完整schema信息的字典
        """
        db_id = self._get_db_id(db)
        
        # 检查是否需要刷新
        if self._need_refresh(db_id):
            schema = await self._extract_schema(db)
            self.schema_cache[db_id] = schema
            self.last_refresh[db_id] = datetime.now()
            return schema
            
        return self.schema_cache[db_id]
    
    async def _extract_schema(self, db: Any) -> Dict:
        """提取数据库schema信息"""
        try:
            engine = db.engine if hasattr(db, 'engine') else db
            inspector = inspect(engine)
            
            schema_info = {
                "tables": {},
                "relations": [],
                "metadata": {
                    "extracted_at": datetime.now().isoformat(),
                    "database": str(engine.url.database)
                }
            }
            
            # 获取所有表名
            tables = inspector.get_table_names()
            
            # 提取每个表的信息
            for table in tables:
                table_info = await self._extract_table_info(inspector, table)
                schema_info["tables"][table] = table_info
            
            # 提取表关系
            relations = self._extract_relations(inspector, tables)
            schema_info["relations"] = relations
            
            return schema_info
            
        except Exception as e:
            logger.error(f"Schema extraction failed: {e}")
            raise
    
    async def _extract_table_info(self, inspector: Any, table: str) -> Dict:
        """提取单个表的详细信息"""
        columns = []
        primary_keys = set(inspector.get_pk_constraint(table)['constrained_columns'])
        
        for col in inspector.get_columns(table):
            column = ColumnInfo(
                name=col['name'],
                type=str(col['type']),
                nullable=col.get('nullable', True),
                primary_key=col['name'] in primary_keys
            )
            columns.append(vars(column))
        
        # 获取索引信息
        indexes = inspector.get_indexes(table)
        
        # 获取表注释
        table_comment = None
        try:
            table_comment = inspector.get_table_comment(table)
        except:
            pass
        
        return {
            "columns": columns,
            "indexes": indexes,
            "comment": table_comment,
            "estimated_row_count": await self._get_estimated_row_count(inspector.bind, table)
        }
    
    def _extract_relations(self, inspector: Any, tables: List[str]) -> List[Dict]:
        """提取表间关系"""
        relations = []
        
        for table in tables:
            for fk in inspector.get_foreign_keys(table):
                relation = TableRelation(
                    from_table=table,
                    from_column=fk['constrained_columns'][0],
                    to_table=fk['referred_table'],
                    to_column=fk['referred_columns'][0],
                    relation_type=self._determine_relation_type(
                        inspector, table, fk['referred_table']
                    )
                )
                relations.append(vars(relation))
        
        return relations
    
    async def _get_estimated_row_count(self, engine: Engine, table: str) -> Optional[int]:
        """获取表的估计行数"""
        try:
            with engine.connect() as conn:
                # PostgreSQL特定的查询
                if engine.name == 'postgresql':
                    result = conn.execute(text(
                        f"SELECT reltuples::bigint AS estimate FROM pg_class "
                        f"WHERE relname = '{table}'"
                    ))
                    row = result.fetchone()
                    return row[0] if row else None
                return None
        except:
            return None
    
    def _determine_relation_type(self, inspector: Any, from_table: str, to_table: str) -> str:
        """确定关系类型"""
        from_pks = set(inspector.get_pk_constraint(from_table)['constrained_columns'])
        to_pks = set(inspector.get_pk_constraint(to_table)['constrained_columns'])
        
        # 简单的关系类型判断逻辑
        if len(from_pks) == 1 and len(to_pks) == 1:
            return 'ONE_TO_MANY'  # 默认假设为一对多
        return 'UNKNOWN'
    
    def _get_db_id(self, db: Any) -> str:
        """生成数据库唯一标识"""
        if hasattr(db, 'engine'):
            return str(db.engine.url)
        return str(db.url if hasattr(db, 'url') else id(db))
    
    def _need_refresh(self, db_id: str) -> bool:
        """检查是否需要刷新schema缓存"""
        if db_id not in self.last_refresh:
            return True
            
        elapsed = (datetime.now() - self.last_refresh[db_id]).total_seconds()
        return elapsed > self.refresh_interval
    
    def get_table_info(self, db_id: str, table: str) -> Optional[Dict]:
        """获取指定表的信息"""
        schema = self.schema_cache.get(db_id)
        if not schema:
            return None
        return schema["tables"].get(table)
    
    def get_related_tables(self, db_id: str, table: str) -> Set[str]:
        """获取与指定表有关联的所有表"""
        schema = self.schema_cache.get(db_id)
        if not schema:
            return set()
            
        related = set()
        for relation in schema["relations"]:
            if relation["from_table"] == table:
                related.add(relation["to_table"])
            elif relation["to_table"] == table:
                related.add(relation["from_table"])
        
        return related

@dataclass
class QueryComplexity:
    """查询复杂度评估"""
    level: str  # 'SIMPLE', 'MEDIUM', 'COMPLEX'
    score: float  # 0-100的复杂度分数
    factors: List[str]  # 影响复杂度的因素

@dataclass
class QueryComponent:
    """查询组成部分"""
    tables: Set[str]  # 涉及的表
    columns: Set[str]  # 使用的列
    conditions: List[str]  # WHERE条件
    joins: List[str]  # JOIN条件
    aggregations: List[str]  # 聚合函数
    group_by: List[str]  # GROUP BY子句
    order_by: List[str]  # ORDER BY子句
    limit: Optional[int]  # LIMIT子句

class QueryAnalyzer:
    """SQL查询分析器
    
    分析SQL查询的结构、复杂度和潜在问题
    """
    
    def __init__(self):
        self.complexity_weights = {
            'table_count': 10,
            'join_count': 15,
            'condition_count': 5,
            'aggregation_count': 8,
            'subquery_count': 20,
            'union_count': 12
        }
    
    def analyze_query(self, sql: str) -> Dict:
        """分析SQL查询
        
        Args:
            sql: SQL查询语句
            
        Returns:
            Dict: 包含分析结果的字典
        """
        try:
            # 解析SQL
            parsed = sqlparse.parse(sql)[0]
            
            # 提取查询组件
            components = self._extract_components(parsed)
            
            # 评估复杂度
            complexity = self._evaluate_complexity(parsed, components)
            
            # 检查潜在问题
            issues = self._check_issues(parsed, components)
            
            return {
                "components": vars(components),
                "complexity": vars(complexity),
                "issues": issues,
                "suggestions": self._generate_suggestions(components, complexity, issues)
            }
            
        except Exception as e:
            logger.error(f"Query analysis failed: {e}")
            raise
    
    def _extract_components(self, parsed: TokenList) -> QueryComponent:
        """提取查询组件"""
        components = QueryComponent(
            tables=set(),
            columns=set(),
            conditions=[],
            joins=[],
            aggregations=[],
            group_by=[],
            order_by=[],
            limit=None
        )
        
        # 遍历tokens
        for token in parsed.tokens:
            # 提取表名
            if isinstance(token, Identifier):
                if token.get_parent_name():
                    components.tables.add(token.get_parent_name())
                    
            # 提取WHERE条件
            elif isinstance(token, Where):
                for condition in self._extract_conditions(token):
                    components.conditions.append(condition)
                    
            # 提取JOIN
            elif token.ttype is Keyword and 'JOIN' in token.value.upper():
                next_token = parsed.token_next(parsed.token_index(token))[1]
                if next_token:
                    components.joins.append(f"{token.value} {next_token.value}")
                    
            # 提取聚合函数
            elif token.ttype is None and token.get_name():
                if any(agg in token.value.upper() for agg in ['COUNT', 'SUM', 'AVG', 'MAX', 'MIN']):
                    components.aggregations.append(token.value)
                    
            # 提取GROUP BY
            elif token.ttype is Keyword and token.value.upper() == 'GROUP BY':
                next_token = parsed.token_next(parsed.token_index(token))[1]
                if next_token:
                    components.group_by.extend(str(next_token).split(','))
                    
            # 提取ORDER BY
            elif token.ttype is Keyword and token.value.upper() == 'ORDER BY':
                next_token = parsed.token_next(parsed.token_index(token))[1]
                if next_token:
                    components.order_by.extend(str(next_token).split(','))
                    
            # 提取LIMIT
            elif token.ttype is Keyword and token.value.upper() == 'LIMIT':
                next_token = parsed.token_next(parsed.token_index(token))[1]
                if next_token and next_token.ttype is not Punctuation:
                    components.limit = int(str(next_token))
        
        return components
    
    def _extract_conditions(self, where_clause: Where) -> List[str]:
        """提取WHERE条件"""
        conditions = []
        for token in where_clause.tokens:
            if isinstance(token, Comparison):
                conditions.append(str(token))
            elif token.ttype is Keyword and token.value.upper() in ['AND', 'OR']:
                conditions.append(token.value)
        return conditions
    
    def _evaluate_complexity(self, parsed: TokenList, components: QueryComponent) -> QueryComplexity:
        """评估查询复杂度"""
        score = 0
        factors = []
        
        # 计算基础分数
        score += len(components.tables) * self.complexity_weights['table_count']
        if len(components.tables) > 1:
            factors.append(f"涉及{len(components.tables)}张表")
            
        score += len(components.joins) * self.complexity_weights['join_count']
        if components.joins:
            factors.append(f"包含{len(components.joins)}个JOIN")
            
        score += len(components.conditions) * self.complexity_weights['condition_count']
        if components.conditions:
            factors.append(f"包含{len(components.conditions)}个条件")
            
        score += len(components.aggregations) * self.complexity_weights['aggregation_count']
        if components.aggregations:
            factors.append(f"使用{len(components.aggregations)}个聚合函数")
        
        # 检查子查询
        subquery_count = len([t for t in parsed.tokens if isinstance(t, TokenList)])
        score += subquery_count * self.complexity_weights['subquery_count']
        if subquery_count > 0:
            factors.append(f"包含{subquery_count}个子查询")
        
        # 确定复杂度级别
        if score < 30:
            level = 'SIMPLE'
        elif score < 70:
            level = 'MEDIUM'
        else:
            level = 'COMPLEX'
            
        return QueryComplexity(level=level, score=min(score, 100), factors=factors)
    
    def _check_issues(self, parsed: TokenList, components: QueryComponent) -> List[Dict]:
        """检查潜在问题"""
        issues = []
        
        # 检查笛卡尔积
        if len(components.tables) > 1 and len(components.joins) < len(components.tables) - 1:
            issues.append({
                "type": "CARTESIAN_JOIN",
                "severity": "HIGH",
                "message": "查询可能产生笛卡尔积"
            })
        
        # 检查SELECT *
        if any(token.value == '*' for token in parsed.tokens):
            issues.append({
                "type": "SELECT_ALL",
                "severity": "MEDIUM",
                "message": "使用SELECT *可能影响性能"
            })
        
        # 检查GROUP BY但没有聚合
        if components.group_by and not components.aggregations:
            issues.append({
                "type": "GROUP_WITHOUT_AGGREGATE",
                "severity": "LOW",
                "message": "GROUP BY子句但没有聚合函数"
            })
        
        return issues
    
    def _generate_suggestions(
        self, 
        components: QueryComponent, 
        complexity: QueryComplexity, 
        issues: List[Dict]
    ) -> List[str]:
        """生成优化建议"""
        suggestions = []
        
        # 基于复杂度的建议
        if complexity.level == 'COMPLEX':
            suggestions.append("考虑拆分复杂查询为多个简单查询")
            
        # 基于问题的建议
        for issue in issues:
            if issue["type"] == "CARTESIAN_JOIN":
                suggestions.append("添加适当的JOIN条件避免笛卡尔积")
            elif issue["type"] == "SELECT_ALL":
                suggestions.append("明确指定需要的列而不是使用SELECT *")
                
        # 基于组件的建议
        if len(components.tables) > 3:
            suggestions.append("考虑是否所有表关联都必要")
            
        if not components.limit and complexity.level != 'SIMPLE':
            suggestions.append("考虑添加LIMIT子句限制结果集大小")
            
        return suggestions

class ResultValidator:
    def validate_result(self, sql: str, result: Any) -> bool:
        """验证查询结果的合理性"""
        checks = [
            self._check_result_size(result),
            self._check_null_values(result),
            self._check_data_types(result),
            self._check_value_ranges(result)
        ]
        return all(checks)
        
    def _check_result_size(self, result: Any) -> bool:
        """检查结果集大小是否合理"""
        pass
        
    def _check_null_values(self, result: Any) -> bool:
        """检查空值比例"""
        pass


class NLGenerator:
    def generate_explanation(self, sql: str, result: Any) -> str:
        """生成查询结果的自然语言解释"""
        context = self._analyze_context(sql, result)
        template = self._select_template(context)
        return self._fill_template(template, context)
        
    def _analyze_context(self, sql: str, result: Any) -> Dict:
        """分析查询上下文"""
        pass
        
    def _select_template(self, context: Dict) -> str:
        """选择合适的解释模板"""
        pass


# 使用示例
if __name__ == "__main__":
    # 示例schema
    sample_schema = {
        "customers": {
            "columns": [
                {"name": "customer_id", "type": "integer"},
                {"name": "customer_name", "type": "varchar"},
                {"name": "email", "type": "varchar"}
            ],
            "relations": [
                "customers.customer_id -> orders.customer_id"
            ]
        },
        "orders": {
            "columns": [
                {"name": "order_id", "type": "integer"},
                {"name": "customer_id", "type": "integer"},
                {"name": "order_date", "type": "timestamp"}
            ],
            "relations": [
                "orders.customer_id -> customers.customer_id"
            ]
        }
    }
    
    # 初始化组件
    prompt_generator = SQLPromptGenerator()
    error_handler = SQLErrorHandler()
    
    # 测试提示词生成
    query = "找出下单次数最多的前10名客户"
    prompt = prompt_generator.generate_prompt(query, sample_schema)
    print("生成的提示词:")
    print(prompt)
    print("\n" + "="*50 + "\n")
    
    # 测试错误处理
    try:
        raise TableNotFoundError("Table 'unknown_table' not found")
    except Exception as e:
        error_result = error_handler.handle_error(e, "SELECT * FROM unknown_table", sample_schema)
        print("错误处理结果:")
        print(error_result)

    # 创建缓存实例
    cache = QueryCache(ttl=60, max_size=100)
    
    # 测试数据
    test_query = "查找销售额最高的产品"
    test_result = {
        "sql": "SELECT product_name, SUM(amount) FROM sales GROUP BY product_name ORDER BY SUM(amount) DESC LIMIT 1",
        "result": [("产品A", 1000)],
        "execution_time": 0.5
    }
    
    # 设置缓存
    cache.set(test_query, test_result)
    
    # 获取缓存
    cached_result = cache.get(test_query)
    print("缓存的结果:", cached_result)
    
    # 查看统计信息
    stats = cache.get_stats()
    print("\n缓存统计:", json.dumps(stats, indent=2))
    
    # 测试过期清理
    time.sleep(61)  # 等待缓存过期
    expired_result = cache.get(test_query)
    print("\n过期后获取结果:", expired_result)

    from sqlalchemy import create_engine
    import json
    
    # 创建测试数据库连接
    engine = create_engine("postgresql://user:password@localhost:5432/testdb")
    
    # 初始化SchemaManager
    schema_manager = SchemaManager()
    
    # 异步环境中使用
    async def test_schema_manager():
        # 获取schema信息
        schema = await schema_manager.get_schema(engine)
        
        # 打印schema信息
        print("数据库Schema信息:")
        print(json.dumps(schema, indent=2))
        
        # 获取特定表信息
        table_info = schema_manager.get_table_info(
            schema_manager._get_db_id(engine),
            "customers"
        )
        print("\n客户表信息:")
        print(json.dumps(table_info, indent=2))
        
        # 获取相关表
        related = schema_manager.get_related_tables(
            schema_manager._get_db_id(engine),
            "customers"
        )
        print("\n相关表:")
        print(related)

    # 运行测试
    import asyncio
    asyncio.run(test_schema_manager())

    # 初始化分析器
    analyzer = QueryAnalyzer()
    
    # 测试查询
    test_queries = [
        # 简单查询
        """
        SELECT customer_name, email 
        FROM customers 
        WHERE status = 'active'
        """,
        
        # 中等复杂度查询
        """
        SELECT c.customer_name, COUNT(o.order_id) as order_count
        FROM customers c
        LEFT JOIN orders o ON c.customer_id = o.customer_id
        WHERE o.order_date >= '2024-01-01'
        GROUP BY c.customer_name
        HAVING COUNT(o.order_id) > 5
        ORDER BY order_count DESC
        LIMIT 10
        """,
        
        # 复杂查询
        """
        SELECT 
            d.department_name,
            e.employee_name,
            COUNT(o.order_id) as order_count,
            SUM(o.total_amount) as total_sales
        FROM departments d
        JOIN employees e ON d.dept_id = e.dept_id
        LEFT JOIN orders o ON e.emp_id = o.sales_rep_id
        JOIN customers c ON o.customer_id = c.customer_id
        WHERE o.order_date BETWEEN '2024-01-01' AND '2024-12-31'
        AND c.region IN ('North', 'South')
        GROUP BY d.department_name, e.employee_name
        HAVING COUNT(o.order_id) > 100
        ORDER BY total_sales DESC
        """
    ]
    
    # 分析查询
    for i, query in enumerate(test_queries, 1):
        print(f"\n分析查询 {i}:")
        print("-" * 50)
        print(f"SQL: {query.strip()}")
        
        analysis = analyzer.analyze_query(query)
        
        print("\n复杂度:")
        print(f"级别: {analysis['complexity']['level']}")
        print(f"分数: {analysis['complexity']['score']}")
        print(f"因素: {', '.join(analysis['complexity']['factors'])}")
        
        if analysis['issues']:
            print("\n潜在问题:")
            for issue in analysis['issues']:
                print(f"- [{issue['severity']}] {issue['message']}")
        
        if analysis['suggestions']:
            print("\n优化建议:")
            for suggestion in analysis['suggestions']:
                print(f"- {suggestion}")
        
        print("\n" + "=" * 50)