import os
import re
import torch
import sqlparse
from typing import List, Dict, Any
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity

from app.core.db_engine import query_documents, embedding_model
from app.core.config import settings
from app.models.text2sql import SqlResponse, EvaluationResult

# 全局变量
model = None

# 初始化模型
def init_model():
    global model
    if model is None:
        # 加载HuggingFace模型用于Text2SQL转换
        model = AutoModelForSeq2SeqLM.from_pretrained(
            settings.TEXT2SQL_MODEL,
            torch_dtype=torch.float32
        )
        # 使用CPU以确保在Windows环境中兼容性
        model.to("cpu")

# 获取模型
def get_model():
    global model
    if model is None:
        init_model()
    return model

# 从自然语言生成SQL
async def generate_sql(question: str, context: str = None) -> SqlResponse:
    # 初始化模型
    model = get_model()
    tokenizer = AutoTokenizer.from_pretrained(settings.TEXT2SQL_MODEL)
    
    # 查询相关文档作为上下文（添加await关键字）
    relevant_docs = await query_documents(question)
    
    # 构建上下文
    if not context:
        context = "\n".join([doc['document'] for doc in relevant_docs[:3]])
    
    # 构建输入文本
    input_text = f"""Convert the following natural language question to SQL query based on the given context.
    Context: {context}
    Question: {question}
    SQL:"""
    
    # 生成SQL
    inputs = tokenizer(input_text, return_tensors="pt", truncation=True, max_length=512)
    outputs = model.generate(
        inputs["input_ids"],
        max_length=256,
        num_beams=5,
        early_stopping=True,
        temperature=0.7
    )
    
    # 解码生成的SQL
    generated_sql = tokenizer.decode(outputs[0], skip_special_tokens=True)
    
    # 计算置信度分数（这里使用简单的方法，实际应用中可以使用更复杂的方法）
    confidence_score = 0.8  # 临时值，实际应该根据模型输出计算
    
    # 返回结果
    return SqlResponse(
        question=question,
        generated_sql=generated_sql,
        confidence_score=confidence_score,
        context_documents=relevant_docs
    )

# 评估生成的SQL
def evaluate_sql(question: str, generated_sql: str, expected_sql: str = None) -> EvaluationResult:
    # 解析SQL语句
    parsed_generated = sqlparse.parse(generated_sql)[0] if generated_sql else None
    parsed_expected = sqlparse.parse(expected_sql)[0] if expected_sql else None
    
    # 提取SQL语句的关键元素
    generated_elements = extract_sql_elements(parsed_generated)
    expected_elements = extract_sql_elements(parsed_expected)
    
    # 计算相似度分数
    similarity_score = calculate_similarity(generated_elements, expected_elements)
    
    # 判断是否正确
    is_correct = similarity_score >= settings.EVAL_THRESHOLD
    
    # 准备评估详情
    evaluation_details = {
        "generated_elements": generated_elements,
        "expected_elements": expected_elements,
        "similarity_metrics": {
            "overall": similarity_score,
        }
    }
    
    # 如果有预期的SQL，添加额外的比较信息
    if expected_sql:
        evaluation_details["similarity_metrics"]["structure_match"] = compare_sql_structure(generated_sql, expected_sql)
    
    # 返回评估结果
    return EvaluationResult(
        question=question,
        generated_sql=generated_sql,
        expected_sql=expected_sql,
        similarity_score=similarity_score,
        is_correct=is_correct,
        evaluation_details=evaluation_details
    )

# 提取SQL语句的关键元素
def extract_sql_elements(parsed_sql) -> Dict[str, Any]:
    if not parsed_sql:
        return {}
    
    elements = {
        "tokens": [],
        "keywords": [],
        "tables": set(),
        "columns": set(),
        "where_clauses": [],
        "joins": [],
        "selects": [],
        "aggregations": []
    }
    
    # SQL关键字列表
    keywords = ["SELECT", "FROM", "WHERE", "JOIN", "LEFT", "RIGHT", "INNER", "OUTER", "ON", "AND", "OR", "INSERT", "UPDATE", "DELETE", "CREATE", "DROP", "ALTER", "TABLE", "INDEX", "VIEW", "GROUP BY", "ORDER BY", "HAVING", "LIMIT", "OFFSET"]
    
    # 聚合函数列表
    aggregations = ["COUNT", "SUM", "AVG", "MIN", "MAX"]
    
    # 遍历SQL令牌
    for token in parsed_sql.tokens:
        token_text = token.value.strip().upper()
        if token_text:
            elements["tokens"].append(token_text)
            
            # 检查是否为关键字
            if token_text in keywords:
                elements["keywords"].append(token_text)
            
            # 检查是否为聚合函数
            for agg in aggregations:
                if token_text.startswith(agg + "("):
                    elements["aggregations"].append(token_text)
    
    # 简单的表名和列名提取（实际应用中可能需要更复杂的解析）
    from_pattern = re.compile(r'FROM\s+(\w+)', re.IGNORECASE)
    select_pattern = re.compile(r'SELECT\s+(.*?)\s+FROM', re.IGNORECASE | re.DOTALL)
    
    from_match = from_pattern.search(parsed_sql.value)
    if from_match:
        elements["tables"].add(from_match.group(1))
    
    select_match = select_pattern.search(parsed_sql.value)
    if select_match:
        select_part = select_match.group(1)
        columns = [col.strip() for col in select_part.split(',')]
        elements["selects"] = columns
        for col in columns:
            # 简单的列名提取
            if '.' in col:
                col_name = col.split('.')[1].strip()
            else:
                col_name = col.strip()
            # 去除可能的聚合函数
            for agg in aggregations:
                if col_name.upper().startswith(agg + "("):
                    col_name = col_name[len(agg):].strip('()')
                    break
            elements["columns"].add(col_name)
    
    return elements

# 计算SQL元素的相似度
def calculate_similarity(generated: Dict[str, Any], expected: Dict[str, Any]) -> float:
    if not expected:
        return 0.5  # 如果没有预期的SQL，返回一个中间值
    
    if not generated:
        return 0.0  # 如果没有生成SQL，返回0
    
    # 计算各个部分的相似度
    keyword_similarity = calculate_set_similarity(
        generated.get("keywords", []), 
        expected.get("keywords", [])
    )
    
    table_similarity = calculate_set_similarity(
        list(generated.get("tables", set())), 
        list(expected.get("tables", set()))
    )
    
    column_similarity = calculate_set_similarity(
        list(generated.get("columns", set())), 
        list(expected.get("columns", set()))
    )
    
    # 加权计算总体相似度
    overall_similarity = (
        keyword_similarity * 0.3 + 
        table_similarity * 0.3 + 
        column_similarity * 0.4
    )
    
    return overall_similarity

# 计算集合相似度
def calculate_set_similarity(set1: List[str], set2: List[str]) -> float:
    set1 = [s.upper() for s in set1]
    set2 = [s.upper() for s in set2]
    
    if not set1 and not set2:
        return 1.0
    
    if not set1 or not set2:
        return 0.0
    
    # 计算Jaccard相似度
    intersection = len(set(set1) & set(set2))
    union = len(set(set1) | set(set2))
    
    return intersection / union

# 比较SQL结构
def compare_sql_structure(sql1: str, sql2: str) -> float:
    # 简单的结构比较，可以扩展为更复杂的比较
    tokens1 = sqlparse.parse(sql1)[0].tokens if sql1 else []
    tokens2 = sqlparse.parse(sql2)[0].tokens if sql2 else []
    
    # 提取关键字序列
    keywords1 = [t.value.strip().upper() for t in tokens1 if t.value.strip().upper() in ["SELECT", "FROM", "WHERE", "JOIN", "GROUP BY", "ORDER BY"]]
    keywords2 = [t.value.strip().upper() for t in tokens2 if t.value.strip().upper() in ["SELECT", "FROM", "WHERE", "JOIN", "GROUP BY", "ORDER BY"]]
    
    # 计算序列相似度（简单的匹配计数）
    matches = sum(1 for k1, k2 in zip(keywords1, keywords2) if k1 == k2)
    max_length = max(len(keywords1), len(keywords2))
    
    return matches / max_length if max_length > 0 else 1.0