from __future__ import annotations

import json
import uuid
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple

import pandas as pd
from fastapi import UploadFile

from backend.common.config import config_manager
from backend.database.db import db_manager
from backend.excel.utils import dataframe_to_llm_markdown, load_excel_safely
from backend.llm import llm_manager
from backend.logger_setup import get_logger

logger = get_logger("excel.service")

DATASET_DIR = Path(__file__).resolve().parents[1] / "data" / "excel_datasets"
DATASET_DIR.mkdir(parents=True, exist_ok=True)


class ExcelServiceError(Exception):
    """Domain-specific errors with code and HTTP status."""

    def __init__(self, code: str, message: str, status_code: int = 400):
        super().__init__(message)
        self.code = code
        self.status_code = status_code


def _allowed_extension(filename: str) -> bool:
    return filename.lower().endswith((".xlsx", ".xls", ".csv"))


def _read_dataframe(path: Path, sheet_name: Optional[str] = None) -> pd.DataFrame:
    return load_excel_safely(str(path), sheet_name=sheet_name)


class ExcelDatasetService:
    @staticmethod
    def upload_dataset(file: UploadFile, file_bytes: bytes) -> Dict[str, Any]:
        filename = file.filename or "dataset.xlsx"
        if not _allowed_extension(filename):
            raise ExcelServiceError("INVALID_EXTENSION", "只支持 .xlsx/.xls/.csv 文件")

        dataset_id = str(uuid.uuid4())
        storage_name = f"{dataset_id}{Path(filename).suffix.lower()}"
        storage_path = DATASET_DIR / storage_name
        storage_path.write_bytes(file_bytes)

        try:
            df = _read_dataframe(storage_path)
            rows, cols = df.shape
        except Exception as exc:
            logger.error(f"读取 Excel 失败: {exc}")
            raise ExcelServiceError("READ_FAILED", f"读取 Excel 失败: {exc}")

        created_at = datetime.utcnow()
        filesize = len(file_bytes)
        ExcelDatasetService._insert_dataset(
            dataset_id=dataset_id,
            name=filename,
            filename=storage_name,
            filepath=str(storage_path),
            filesize=filesize,
            rows=rows,
            cols=cols,
            sheet_name=None,
            created_at=created_at,
        )

        return {
            "id": dataset_id,
            "name": filename,
            "filename": storage_name,
            "created_at": created_at.isoformat(),
            "rows": rows,
            "cols": cols,
        }

    @staticmethod
    def list_datasets(page: int = 1, size: int = 20) -> Dict[str, Any]:
        offset = (page - 1) * size
        try:
            with db_manager.get_cursor() as cursor:
                if db_manager.db_type == "sqlite":
                    cursor.execute(
                        """
                        SELECT id, name, filename, filepath, filesize, sheet_name, rows, cols, created_at
                        FROM excel_datasets
                        ORDER BY created_at DESC
                        LIMIT ? OFFSET ?
                        """,
                        (size, offset),
                    )
                else:
                    cursor.execute(
                        """
                        SELECT id, name, filename, filepath, filesize, sheet_name, rows, cols, created_at
                        FROM excel_datasets
                        ORDER BY created_at DESC
                        LIMIT %s OFFSET %s
                        """,
                        (size, offset),
                    )
                items = cursor.fetchall() or []

                # total
                cursor.execute("SELECT COUNT(*) FROM excel_datasets")
                total_row = cursor.fetchone()
                if isinstance(total_row, dict):
                    total = (
                        total_row.get("COUNT(*)")
                        or total_row.get("count")
                        or next(iter(total_row.values()), 0)
                    )
                else:
                    total = total_row[0] if total_row else 0

            def normalize(row: Any) -> Dict[str, Any]:
                if isinstance(row, dict):
                    return row
                return {
                    "id": row[0],
                    "name": row[1],
                    "filename": row[2],
                    "filepath": row[3],
                    "filesize": row[4],
                    "sheet_name": row[5],
                    "rows": row[6],
                    "cols": row[7],
                    "created_at": row[8],
                }

            return {"items": [normalize(r) for r in items], "total": total}
        except Exception as exc:
            logger.error(f"查询数据集失败: {exc}")
            raise ExcelServiceError("LIST_FAILED", "获取数据集列表失败")

    @staticmethod
    def get_dataset(dataset_id: str) -> Dict[str, Any]:
        try:
            with db_manager.get_cursor() as cursor:
                if db_manager.db_type == "sqlite":
                    cursor.execute(
                        """
                        SELECT id, name, filename, filepath, filesize, sheet_name, rows, cols, created_at
                        FROM excel_datasets
                        WHERE id = ?
                        """,
                        (dataset_id,),
                    )
                else:
                    cursor.execute(
                        """
                        SELECT id, name, filename, filepath, filesize, sheet_name, rows, cols, created_at
                        FROM excel_datasets
                        WHERE id = %s
                        """,
                        (dataset_id,),
                    )
                row = cursor.fetchone()
            if not row:
                raise ExcelServiceError("DATASET_NOT_FOUND", "数据集不存在", status_code=404)
            if isinstance(row, dict):
                return row
            return {
                "id": row[0],
                "name": row[1],
                "filename": row[2],
                "filepath": row[3],
                "filesize": row[4],
                "sheet_name": row[5],
                "rows": row[6],
                "cols": row[7],
                "created_at": row[8],
            }
        except ExcelServiceError:
            raise
        except Exception as exc:
            logger.error(f"加载数据集失败: {exc}")
            raise ExcelServiceError("LOAD_FAILED", "加载数据集失败")

    @staticmethod
    def preview_dataset(dataset_id: str, limit: int = 50) -> Dict[str, Any]:
        dataset = ExcelDatasetService.get_dataset(dataset_id)
        path = Path(dataset["filepath"])
        if not path.exists():
            raise ExcelServiceError("FILE_MISSING", "数据集文件不存在", status_code=404)
        try:
            df = _read_dataframe(path)
            preview_df = df.head(limit)
            preview_df = preview_df.fillna("")
            columns = list(map(str, preview_df.columns))
            rows = preview_df.values.tolist()
            return {"columns": columns, "rows": rows}
        except ExcelServiceError:
            raise
        except Exception as exc:
            logger.error(f"预览数据集失败: {exc}")
            raise ExcelServiceError("PREVIEW_FAILED", "预览数据集失败")

    @staticmethod
    def qa_dataset(dataset_id: str, question: str) -> Dict[str, Any]:
        dataset = ExcelDatasetService.get_dataset(dataset_id)
        path = Path(dataset["filepath"])
        if not path.exists():
            raise ExcelServiceError("FILE_MISSING", "数据集文件不存在", status_code=404)

        try:
            df = _read_dataframe(path)
            total_rows = len(df)
            max_rows = 50
            table_markdown = dataframe_to_llm_markdown(df, max_rows=max_rows)
            note = ""
            if total_rows > 5000:
                note = f"注意：表格共有 {total_rows} 行，本次仅基于前 {max_rows} 行预览，请缩小筛选范围以获得更精确的结果。"
            elif total_rows > max_rows:
                note = f"注意：本次仅使用前 {max_rows} 行数据进行分析，完整行数为 {total_rows}。"

            system_prompt = f"""
你是一个专业的数据分析助手，专门根据 Excel 表格数据回答问题。

我会给你一张表格的部分数据（以 Markdown 表格形式展示），请你严格按照以下要求回答：
1. 先确认表头（每一列的含义），再进行分析。
2. 只能根据表格中已有的数据进行推断，不要编造不存在的值。
3. 优先用中文回答，并解释你的计算过程。
4. 如果用户的问题无法从表格中得到答案，请明确说明：“无法从当前表格数据中得出结论”。

下面是表格内容（最多前 {max_rows} 行）：
---
{table_markdown}
---
{note}
""".strip()

            user_prompt = f"用户的问题是：{question}"
            provider = llm_manager.get_provider(config_manager.llm_providers.active_provider)
            answer = provider.generate(f"{system_prompt}\n\n{user_prompt}")
            return {"answer": answer}
        except ExcelServiceError:
            raise
        except Exception as exc:
            logger.error(f"Excel 问答失败: {exc}")
            raise ExcelServiceError("QA_FAILED", "生成回答失败")

    @staticmethod
    def chart_dataset(
        dataset_id: str,
        chart_type: str,
        x_field: str,
        y_field: str,
        group_field: Optional[str] = None,
    ) -> Dict[str, Any]:
        dataset = ExcelDatasetService.get_dataset(dataset_id)
        path = Path(dataset["filepath"])
        if not path.exists():
            raise ExcelServiceError("FILE_MISSING", "数据集文件不存在", status_code=404)

        try:
            df = _read_dataframe(path)
            chart_type = (chart_type or "").lower()
            if chart_type not in {"line", "bar", "pie"}:
                raise ExcelServiceError("INVALID_CHART_TYPE", "chart_type 仅支持 line/bar/pie")

            if x_field not in df.columns or y_field not in df.columns:
                raise ExcelServiceError("FIELD_NOT_FOUND", "字段不存在")

            option: Dict[str, Any]
            if chart_type == "pie":
                label_field = group_field or x_field
                if label_field not in df.columns:
                    raise ExcelServiceError("FIELD_NOT_FOUND", "分组字段不存在")
                grouped = df.groupby(label_field)[y_field].sum().reset_index()
                option = {
                    "tooltip": {"trigger": "item"},
                    "series": [
                        {
                            "type": "pie",
                            "radius": "60%",
                            "data": [
                                {"name": str(row[label_field]), "value": float(row[y_field])}
                                for _, row in grouped.iterrows()
                            ],
                        }
                    ],
                }
            else:
                df = df.dropna(subset=[x_field, y_field])
                grouped = df
                if group_field and group_field in df.columns:
                    grouped = df.groupby([x_field, group_field])[y_field].sum().reset_index()
                    categories = list(map(str, grouped[x_field].unique().tolist()))
                    series_list = []
                    for grp in grouped[group_field].unique():
                        subset = grouped[grouped[group_field] == grp]
                        data_map = {str(row[x_field]): float(row[y_field]) for _, row in subset.iterrows()}
                        series_list.append(
                            {
                                "name": str(grp),
                                "type": "line" if chart_type == "line" else "bar",
                                "data": [data_map.get(cat, 0) for cat in categories],
                            }
                        )
                    option = {
                        "tooltip": {"trigger": "axis"},
                        "legend": {},
                        "xAxis": {"type": "category", "data": categories},
                        "yAxis": {"type": "value"},
                        "series": series_list,
                    }
                else:
                    grouped = df.groupby(x_field)[y_field].sum().reset_index()
                    categories = list(map(str, grouped[x_field].tolist()))
                    option = {
                        "tooltip": {"trigger": "axis"},
                        "xAxis": {"type": "category", "data": categories},
                        "yAxis": {"type": "value"},
                        "series": [
                            {
                                "type": "line" if chart_type == "line" else "bar",
                                "data": [float(v) for v in grouped[y_field].tolist()],
                                "name": str(y_field),
                            }
                        ],
                    }
            return {"option": option}
        except ExcelServiceError:
            raise
        except Exception as exc:
            logger.error(f"生成图表失败: {exc}")
            raise ExcelServiceError("CHART_FAILED", "生成图表失败")

    @staticmethod
    def _insert_dataset(
        dataset_id: str,
        name: str,
        filename: str,
        filepath: str,
        filesize: int,
        rows: int,
        cols: int,
        sheet_name: Optional[str],
        created_at: datetime,
    ) -> None:
        try:
            with db_manager.get_cursor() as cursor:
                params: Tuple[Any, ...] = (
                    dataset_id,
                    name,
                    filename,
                    filepath,
                    filesize,
                    sheet_name,
                    rows,
                    cols,
                    created_at,
                )
                if db_manager.db_type == "sqlite":
                    cursor.execute(
                        """
                        INSERT INTO excel_datasets
                        (id, name, filename, filepath, filesize, sheet_name, rows, cols, created_at)
                        VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
                        """,
                        params,
                    )
                else:
                    cursor.execute(
                        """
                        INSERT INTO excel_datasets
                        (id, name, filename, filepath, filesize, sheet_name, rows, cols, created_at)
                        VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)
                        """,
                        params,
                    )
        except Exception as exc:
            logger.error(f"保存数据集失败: {exc}")
            raise ExcelServiceError("INSERT_FAILED", "保存数据集失败")
