from __future__ import annotations

from typing import Any, Dict, Optional
from decimal import Decimal
from datetime import date, datetime
from fastapi import APIRouter, Depends, HTTPException, Request
from pydantic import BaseModel

from ..dependencies import analysis_service_dependency, orchestrator_dependency
from ...services.analysis_service import AnalysisService
from ...core.database import DatabaseUtils
from ...core.config import settings
from ...agents.multi_agent_orchestrator import MultiAgentOrchestrator
from fastapi.responses import StreamingResponse
import json
from ...agents.column_title_agent import ColumnTitleAgent


router = APIRouter()
def _to_json_safe(obj: Any) -> Any:
    if isinstance(obj, (Decimal, date, datetime)):
        return str(obj)
    if isinstance(obj, dict):
        return {k: _to_json_safe(v) for k, v in obj.items()}
    if isinstance(obj, list):
        return [_to_json_safe(i) for i in obj]
    if isinstance(obj, tuple):
        return tuple(_to_json_safe(i) for i in obj)
    return obj


class AnalysisRequest(BaseModel):
    query: str
    analysis_type: Optional[str] = "auto"
    options: Optional[Dict[str, Any]] = None


@router.post("/query")
async def analyze_query(req: AnalysisRequest, analysis_service: AnalysisService = Depends(analysis_service_dependency)):
    try:
        result = await analysis_service.analyze_query(query=req.query)
        # 确保返回可被 JSON 序列化
        result = _to_json_safe(result)
        return {"success": True, "message": "分析完成", "data": result}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"分析失败: {e}")

def _sse_event(event: str, data: Dict[str, Any]) -> str:
    # 兼容 Decimal/日期 等不可直接序列化的类型
    return f"event: {event}\ndata: {json.dumps(data, ensure_ascii=False, default=str)}\n\n"

@router.post("/stream")
async def analyze_stream(
    req: AnalysisRequest,
    analysis_service: AnalysisService = Depends(analysis_service_dependency),
    orchestrator: MultiAgentOrchestrator = Depends(orchestrator_dependency),
    request: Request = None,
):
    async def generator():
        try:
            yield _sse_event("intent", {"message": "正在分析查询意图"})
            # 调用意图解析（由 MultiAgentOrchestrator 提供）并回传给前端
            try:
                parsed_intent = orchestrator.extract_intent(req.query or "")
            except Exception:
                parsed_intent = {"metrics_keywords": [], "dimensions_keywords": [], "filter_values": [], "time_range": ""}
            yield _sse_event("intent", {"parsed": parsed_intent})
            # 1) 构建表结构上下文
            yield _sse_event("intent", {"message": "获取数据库表信息"})
            tables = DatabaseUtils.get_target_tables()
            schema_map = {}
            table_samples: Dict[str, list[Dict[str, Any]]] = {}
            for t in tables[:20]:
                schema = DatabaseUtils.get_target_table_schema(t)
                cols = []
                if isinstance(schema, dict):
                    if "columns" in schema and isinstance(schema["columns"], list):
                        for col in schema["columns"]:
                            if isinstance(col, dict):
                                name = col.get("column_name") or col.get("COLUMN_NAME") or col.get("name") or col.get("Field")
                                if name:
                                    cols.append(str(name))
                    elif isinstance(schema.get("column_details"), list):
                        for col in schema["column_details"]:
                            name = col.get("Field") or col.get("column_name")
                            if name:
                                cols.append(str(name))
                schema_map[t] = cols
                # 采样数据
                try:
                    table_samples[t] = DatabaseUtils.get_table_samples(t)
                except Exception:
                    table_samples[t] = []
            yield _sse_event("context", {"tables": list(schema_map.keys())})

            # 基于列名与样本值推断候选关联键，辅助大模型生成更可靠的 JOIN
            inferred_relationships: list[dict] = []
            try:
                def _sample_values(tab: str, col: str) -> set[str]:
                    vals: set[str] = set()
                    for row in table_samples.get(tab, [])[:50]:
                        v = row.get(col)
                        if v is not None:
                            vals.add(str(v))
                    return vals
                table_list = list(schema_map.keys())
                for i in range(len(table_list)):
                    for j in range(i + 1, len(table_list)):
                        t1, t2 = table_list[i], table_list[j]
                        for c1 in schema_map.get(t1, [])[:64]:
                            for c2 in schema_map.get(t2, [])[:64]:
                                same_suffix = (c1 == c2) or (c1.endswith("_id") and c2.endswith("_id")) or (c1.endswith("_key") and c2.endswith("_key"))
                                if not same_suffix:
                                    continue
                                inter = _sample_values(t1, c1) & _sample_values(t2, c2)
                                score = (1 if c1 == c2 else 0) + (1 if len(inter) > 0 else 0)
                                if score > 0:
                                    inferred_relationships.append({"left_table": t1, "left_col": c1, "right_table": t2, "right_col": c2, "score": score})
            except Exception:
                pass

            # 2) 生成 SQL
            yield _sse_event("sql", {"message": "正在生成SQL"})
            db_type = (settings.target_db_type or "mysql").lower()
            # 将候选关联键注入提示，降低 JOIN 错误概率
            rel_text = ""
            if inferred_relationships:
                rel_pairs = ", ".join([f"{r['left_table']}.{r['left_col']} = {r['right_table']}.{r['right_col']}" for r in inferred_relationships[:50]])
                rel_text = f"\n候选关联键(可优先使用): {rel_pairs}"
            sql = orchestrator.generate_sql(req.query + rel_text, db_type=db_type, table_schemas=schema_map, table_samples=table_samples)
            # 移除或增加 LIMIT 限制，让用户获得更多数据
            try:
                low = sql.lower()
                if low.strip().startswith("select") and " limit " not in low:
                    # 不添加 LIMIT，让查询返回所有结果
                    pass
                else:
                    # 若已存在 LIMIT，则移除限制或设置为很大的值
                    import re as _re_limit
                    def _repl(m):
                        return f"LIMIT 1000000"  # 设置为很大的值，相当于无限制
                    sql = _re_limit.sub(r"(?i)limit\s+\d+", _repl, sql, count=1)
            except Exception:
                pass
            # 转换为目标数据库最终可执行SQL
            final_sql = sql
            if db_type == "postgresql":
                final_sql = (sql or "").replace("`", '"')
            yield _sse_event("sql", {"sql": final_sql})

            # 3) 执行 SQL
            yield _sse_event("exec", {"message": "正在执行SQL"})
            max_retries = 2
            retry_count = 0
            data = []
            row_count = 0
            
            while retry_count <= max_retries:
                try:
                    res = DatabaseUtils.execute_target_query(final_sql)
                    if hasattr(res, "fetchall"):
                        data = [dict(r._mapping) for r in res.fetchall()]
                        row_count = len(data)
                    else:
                        data = {"affected_rows": getattr(res, "rowcount", 0)}
                        row_count = getattr(res, "rowcount", 0)
                    
                    # 检查结果是否有效
                    if row_count > 0 or (isinstance(data, dict) and data.get("affected_rows", 0) > 0):
                        break
                    
                    # 如果结果为空且还有重试机会，重新生成SQL
                    if retry_count < max_retries:
                        yield _sse_event("exec", {"message": f"SQL执行结果为空，正在重新生成SQL (第{retry_count + 1}次重试)"})
                        # 重新分析表结构，排除可能无效的列
                        sql = orchestrator.generate_sql_with_retry(
                            req.query, 
                            db_type=db_type, 
                            table_schemas=schema_map, 
                            table_samples=table_samples,
                            failed_sql=sql,
                            retry_count=retry_count
                        )
                        final_sql = sql
                        if db_type == "postgresql":
                            final_sql = (sql or "").replace("`", '"')
                        yield _sse_event("sql", {"sql": f"重试SQL: {final_sql}"})
                        retry_count += 1
                        continue
                    else:
                        break
                        
                except Exception as e:
                    if retry_count < max_retries:
                        yield _sse_event("exec", {"message": f"SQL执行失败，正在重新生成SQL (第{retry_count + 1}次重试)"})
                        # 重新生成SQL，排除导致错误的列
                        sql = orchestrator.generate_sql_with_retry(
                            req.query, 
                            db_type=db_type, 
                            table_schemas=schema_map, 
                            table_samples=table_samples,
                            failed_sql=sql,
                            retry_count=retry_count
                        )
                        final_sql = sql
                        if db_type == "postgresql":
                            final_sql = (sql or "").replace("`", '"')
                        yield _sse_event("sql", {"sql": f"重试SQL: {final_sql}"})
                        retry_count += 1
                        continue
                    else:
                        raise e
            
            yield _sse_event("exec", {"row_count": row_count})
            # 组装列备注：先根据数据库备注粗匹配，再调用专用Agent为聚合列生成中文标题
            columns_meta = []
            try:
                comments_map = DatabaseUtils.get_column_comments_map()
                # 调试：打印 comments_map 的结构
                import logging
                logger = logging.getLogger(__name__)
                logger.info("comments_map structure: %s", {k: list(v.keys())[:3] for k, v in comments_map.items()})
                # 调试：检查 fact_orders 表的完整字段
                if 'fact_orders' in comments_map:
                    logger.info("fact_orders columns: %s", list(comments_map['fact_orders'].keys()))
                # 调试：检查 fact_promotions 表的完整字段
                if 'fact_promotions' in comments_map:
                    logger.info("fact_promotions columns: %s", list(comments_map['fact_promotions'].keys()))
            except Exception:
                comments_map = {}
            sample_row = data[0] if isinstance(data, list) and data else {}
            header_cols = list(sample_row.keys()) if isinstance(sample_row, dict) else []
            if isinstance(sample_row, dict):
                for col_name in header_cols:
                    comment = ""
                    for tbl, cmap in comments_map.items():
                        if col_name in cmap and cmap[col_name]:
                            comment = cmap[col_name]
                            break
                    if not comment and "." in col_name:
                        pure = col_name.split(".")[-1]
                        for tbl, cmap in comments_map.items():
                            if pure in cmap and cmap[pure]:
                                comment = cmap[pure]
                                break
                    columns_meta.append({"name": col_name, "comment": comment})

            # 提取别名->表达式，并调用 ColumnTitleAgent 生成聚合列中文名
            try:
                alias_source: Dict[str, str] = {}
                import re as _re
                msel = _re.search(r"select\s+([\s\S]*?)\s+from\s", final_sql, _re.IGNORECASE)
                if msel:
                    select_part = msel.group(1)
                    for m in _re.finditer(r"([^,]+?)\s+(?:as\s+)?(\"?[A-Za-z_][A-Za-z0-9_]*\"?)\s*(?:,|$)", select_part, _re.IGNORECASE):
                        expr = m.group(1).strip()
                        alias = m.group(2).strip().strip('"')
                        if alias:
                            alias_source[alias] = expr
                # 精简字段备注，支持表名.列名 和 列名 两种匹配
                referenced_cols = set()
                referenced_table_cols = set()
                for expr in alias_source.values():
                    # 使用正则表达式提取函数内的字段
                    import re as _re_extract
                    # 匹配 SUM(fact_orders.total_amount) 中的 fact_orders.total_amount
                    func_matches = _re_extract.findall(r'\b(?:sum|count|avg|min|max)\s*\(([^)]+)\)', expr, _re_extract.IGNORECASE)
                    for match in func_matches:
                        # 清理引号和空格
                        clean_match = match.strip().strip('"').strip("'")
                        if '.' in clean_match:
                            table_col = clean_match  # 如 fact_orders.total_amount
                            col_only = clean_match.split('.')[-1]  # 如 total_amount
                            referenced_table_cols.add(table_col)
                            referenced_cols.add(col_only)
                        else:
                            referenced_cols.add(clean_match)
                    
                    # 也处理带引号的表名.列名格式（如 "fact_promotions"."promotion_fact_key"）
                    quoted_matches = _re_extract.findall(r'"([^"]+)"\s*\.\s*"([^"]+)"', expr)
                    for table_name, col_name in quoted_matches:
                        table_col = f"{table_name}.{col_name}"
                        referenced_table_cols.add(table_col)
                        referenced_cols.add(col_name)
                    
                    # 也处理非函数调用的字段（如 dim_stores.store_name）
                    for part in (expr or '').split():
                        # 清理引号、反引号和逗号
                        clean_part = part.strip().strip('"').strip("'").strip('`').rstrip(',')
                        if '.' in clean_part and clean_part not in referenced_table_cols:
                            table_col = clean_part  # 如 dim_stores.store_name
                            col_only = clean_part.split('.')[-1]  # 如 store_name
                            referenced_table_cols.add(table_col)
                            referenced_cols.add(col_only)
                        elif clean_part and not clean_part.lower() in ['sum', 'count', 'avg', 'min', 'max', 'as', 'from', 'where', 'group', 'by', 'order', 'limit', 'inner', 'join', 'on'] and clean_part not in referenced_cols:
                            referenced_cols.add(clean_part)
                
                # 调试：打印匹配过程
                logger.info("referenced_table_cols: %s", referenced_table_cols)
                logger.info("referenced_cols: %s", referenced_cols)
                
                brief_comments = {}
                for _t, _cmap in comments_map.items():
                    for _c, _v in _cmap.items():
                        # 优先匹配 表名.列名，再匹配 列名
                        table_col_key = f"{_t}.{_c}"
                        if table_col_key in referenced_table_cols and _v:
                            brief_comments[table_col_key] = _v
                            logger.info("Matched table_col_key: %s -> %s", table_col_key, _v)
                        elif _c in referenced_cols and _v and _c not in brief_comments:
                            brief_comments[_c] = _v
                            logger.info("Matched col_only: %s -> %s", _c, _v)
                title_agent = ColumnTitleAgent()
                mapping = title_agent.generate_titles(
                    query=req.query,
                    final_sql=final_sql,
                    alias_expr=alias_source,
                    brief_comments=brief_comments,
                    col_names=header_cols,
                )
                if isinstance(mapping, dict):
                    for c in columns_meta:
                        if not (c.get("comment") or "").strip():
                            title = mapping.get(c["name"]) or mapping.get(c["name"].split(".")[-1])
                            if title:
                                c["comment"] = str(title)
            except Exception:
                pass
            yield _sse_event("final", {"message": "完成", "data": {"sql": final_sql, "data": data, "row_count": row_count, "columns": columns_meta}})
        except Exception as e:
            yield _sse_event("error", {"message": str(e)})
        finally:
            # 如果客户端已断开，则提前结束；同时发送统一的结束事件，便于前端显式关闭
            try:
                if request is not None and await request.is_disconnected():
                    return
            except Exception:
                pass
            yield _sse_event("end", {"ok": True})
    sse_headers = {
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "X-Accel-Buffering": "no",
    }
    return StreamingResponse(
        generator(),
        media_type="text/event-stream; charset=utf-8",
        headers=sse_headers,
    )

