"""
实验结果管理模块路由
"""
import os
import json
import zipfile
import shutil
import tempfile
import logging
from pathlib import Path
from typing import Dict, Any, Optional
from datetime import datetime, timezone, timedelta
import time
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Query, BackgroundTasks
from fastapi.responses import FileResponse
from sqlalchemy.orm import Session
from pydantic import BaseModel, Field

from ..db import get_db
from ..db.parameters.crud import get_project_by_name_en
from ..db.experiments import crud as experiment_crud
from ..config import EXPERIMENT_RESULTS_ROOT
from ..schemas import MessageResponse
from ..io_v2.enums import IOFormat
from ..io_v2.converter import FormatConverter

logger = logging.getLogger(__name__)

try:
    import pandas as pd
    HAS_PANDAS = True
except ImportError:
    HAS_PANDAS = False
    logger.warning("未安装pandas，无法读取Parquet文件")

try:
    from openpyxl import Workbook
    from openpyxl.styles import Alignment, Border, Font, PatternFill, Side
    from openpyxl.utils import get_column_letter
    HAS_OPENPYXL = True
except ImportError:
    HAS_OPENPYXL = False
    logger.warning("未安装openpyxl，无法生成Excel文件")

router = APIRouter(prefix="/api/projects", tags=["实验结果"])


class ExperimentUpdate(BaseModel):
    """实验更新请求模型"""
    name: Optional[str] = Field(None, max_length=200, description="实验名称")
    description: Optional[str] = Field(None, description="实验描述")


def get_experiment_storage_path(project_name_en: str, experiment_run_id: int) -> Path:
    """获取实验结果的存储路径"""
    root = Path(EXPERIMENT_RESULTS_ROOT)
    return root / project_name_en / str(experiment_run_id)


def format_datetime_for_json(dt):
    """格式化datetime对象为ISO格式字符串"""
    if not dt:
        return None
    if isinstance(dt, str):
        return dt
    if not isinstance(dt, datetime):
        return str(dt)  # 如果不是datetime类型，转换为字符串
    if dt.tzinfo is None:
        local_tz_offset = -time.timezone if time.daylight == 0 else -time.altzone
        local_tz = timezone(timedelta(seconds=local_tz_offset))
        return dt.replace(tzinfo=local_tz).isoformat()
    return dt.isoformat()


@router.post("/{project_name_en}/experiments/upload")
async def upload_experiment(
    project_name_en: str,
    file: UploadFile = File(...),
    db: Session = Depends(get_db)
):
    """
    上传实验结果
    
    接收client端上传的ZIP文件（包含restore目录和metadata.json），
    解压文件到服务器存储位置，并将元信息写入数据库。
    
    ZIP文件结构：
    - restore/          # 恢复目录（与服务器结构一致）
      - snapshot.*     # 参数快照文件（可能是.json或.xlsx等）
      - files/          # 结果文件
      - series/         # 序列数据Parquet文件
    - metadata.json     # 实验元数据（必需，在ZIP根目录）
    
    Args:
        project_name_en: 项目英文名称
        file: ZIP文件（包含restore目录和metadata.json）
        db: 数据库会话
    
    Returns:
        包含experiment_id的响应
    """
    # 1. 验证项目是否存在
    project = get_project_by_name_en(db, project_name_en)
    if not project:
        raise HTTPException(
            status_code=404,
            detail=f"项目 '{project_name_en}' 不存在"
        )
    
    logger.info(f"接收到上传请求，项目: {project_name_en}, 文件名: {file.filename}")
    
    # 验证文件是否存在
    if not file.filename:
        raise HTTPException(
            status_code=400,
            detail="未提供文件名"
        )
    
    # 3. 创建临时目录用于解压ZIP文件
    temp_dir = tempfile.mkdtemp()
    
    try:
        # 4. 保存上传的ZIP文件到临时目录
        zip_path = os.path.join(temp_dir, file.filename or "upload.zip")
        with open(zip_path, "wb") as f:
            content = await file.read()
            f.write(content)
        
        # 5. 解压ZIP文件
        extract_dir = os.path.join(temp_dir, "extracted")
        os.makedirs(extract_dir, exist_ok=True)
        with zipfile.ZipFile(zip_path, 'r') as zip_ref:
            zip_ref.extractall(extract_dir)
        
        # 6. 按照client端的存储结构解析：extract_dir/restore/ 和 extract_dir/metadata.json
        restore_dir = os.path.join(extract_dir, "restore")
        metadata_file_path = os.path.join(extract_dir, "metadata.json")
        
        if not os.path.exists(restore_dir):
            raise HTTPException(
                status_code=400,
                detail="ZIP文件中未找到restore目录"
            )
        
        # 从ZIP文件中读取metadata.json（必需）
        if not os.path.exists(metadata_file_path):
            raise HTTPException(
                status_code=400,
                detail="ZIP文件中未找到metadata.json，请确保ZIP文件根目录包含metadata.json"
            )
        
        with open(metadata_file_path, 'r', encoding='utf-8') as f:
            metadata_dict = json.load(f)
        logger.info("从ZIP文件中读取metadata.json成功")
        
        # 验证metadata中的project_name_en是否匹配
        exp_project_name = metadata_dict.get("experiment", {}).get("project_name_en")
        if exp_project_name != project_name_en:
            raise HTTPException(
                status_code=400,
                detail=f"metadata中的项目名称 '{exp_project_name}' 与URL中的项目名称 '{project_name_en}' 不匹配"
            )
        
        # 7. 创建实验运行记录
        experiment_info = metadata_dict.get("experiment", {})
        # 如果metadata中有created_at，使用它；否则使用当前时间
        created_at = experiment_info.get("created_at")
        db_experiment = experiment_crud.create_experiment_run(
            db=db,
            project_id=project.id,
            name=experiment_info.get("name"),
            description=experiment_info.get("description"),
            snapshot_file_path=None,  # 稍后设置
            created_at=created_at  # 如果提供则使用，否则使用默认值（当前时间）
        )
        
        # 8. 创建存储目录
        storage_path = get_experiment_storage_path(project_name_en, db_experiment.id)
        storage_path.mkdir(parents=True, exist_ok=True)
        
        # 9. 直接把restore目录的内容复制到存储位置（metadata里的路径都是相对于restore的）
        # restore目录结构：snapshot.* (可能是.json或.xlsx等), files/, series/
        # 直接复制整个restore目录内容，保持结构一致
        snapshot_filename = None
        for item in os.listdir(restore_dir):
            src = os.path.join(restore_dir, item)
            dst = storage_path / item
            if os.path.isdir(src):
                if dst.exists():
                    shutil.rmtree(dst)
                shutil.copytree(src, dst)
            else:
                shutil.copy2(src, dst)
                # 检查是否是快照文件（以snapshot开头的文件）
                if item.startswith("snapshot"):
                    snapshot_filename = item
        
        # 设置快照文件路径（如果存在）
        if snapshot_filename:
            relative_snapshot_path = f"{project_name_en}/{db_experiment.id}/{snapshot_filename}"
            db_experiment.snapshot_file_path = relative_snapshot_path
            db.commit()
            db.refresh(db_experiment)
        
        # 10. 创建分类记录
        category_map: Dict[str, int] = {}  # category_name -> category_id
        categories_data = metadata_dict.get("categories", [])
        for cat_data in categories_data:
            category_name = cat_data.get("name")
            if not category_name:
                continue
            
            db_category = experiment_crud.get_experiment_data_category_by_name(
                db, db_experiment.id, category_name
            )
            if not db_category:
                db_category = experiment_crud.create_experiment_data_category(
                    db=db,
                    experiment_run_id=db_experiment.id,
                    name=category_name,
                    description=cat_data.get("description"),
                    sort_order=cat_data.get("sort_order", 0)
                )
            category_map[category_name] = db_category.id
        
        # 11. 创建结果文件记录
        results_data = metadata_dict.get("results", [])
        for result_data in results_data:
            # metadata中的file_path是相对于restore的，直接加上服务器前缀即可
            file_path = result_data.get("file_path", "")
            relative_file_path = f"{project_name_en}/{db_experiment.id}/{file_path}"
            experiment_crud.create_experiment_result(
                db=db,
                experiment_run_id=db_experiment.id,
                name=result_data.get("name", ""),
                file_type=result_data.get("file_type", "other"),
                file_path=relative_file_path,
                file_size=result_data.get("file_size"),
                description=result_data.get("description")
            )
        
        # 12. 创建序列数据记录
        series_data_list = metadata_dict.get("data_series", [])
        for series_data in series_data_list:
            category_name = series_data.get("category_name", "")
            category_id = category_map.get(category_name)
            if not category_id:
                continue  # 跳过没有分类的数据
            
            # metadata中的parquet_path是相对于restore的，直接加上服务器前缀即可
            parquet_path = series_data.get("parquet_path", "")
            relative_parquet_path = f"{project_name_en}/{db_experiment.id}/{parquet_path}"
            
            experiment_crud.create_experiment_data_series(
                db=db,
                experiment_run_id=db_experiment.id,
                category_id=category_id,
                name=series_data.get("name", ""),
                parquet_path=relative_parquet_path,
                data_length=series_data.get("data_length", 0),
                data_group=series_data.get("data_group"),
                unit=series_data.get("unit"),
                description=series_data.get("description"),
                index_column_name=series_data.get("index_column_name", "index")
            )
        
        # 13. 创建单值数据记录
        scalar_data_list = metadata_dict.get("data_scalar", [])
        for scalar_data in scalar_data_list:
            category_name = scalar_data.get("category_name", "")
            category_id = category_map.get(category_name)
            if not category_id:
                continue  # 跳过没有分类的数据
            
            # value字段需要转换为JSON字符串
            value = scalar_data.get("value")
            value_str = json.dumps(value, ensure_ascii=False) if value is not None else ""
            
            experiment_crud.create_experiment_data_scalar(
                db=db,
                experiment_run_id=db_experiment.id,
                category_id=category_id,
                name=scalar_data.get("name", ""),
                value=value_str,
                data_group=scalar_data.get("data_group"),
                unit=scalar_data.get("unit"),
                description=scalar_data.get("description")
            )
        
        # 数据库现在存储的是本地时间，直接格式化输出
        created_at_str = None
        if db_experiment.created_at:
            # 如果是naive datetime（本地时间），添加本地时区信息以便正确显示
            if db_experiment.created_at.tzinfo is None:
                # naive datetime，添加本地时区信息
                local_tz_offset = -time.timezone if time.daylight == 0 else -time.altzone
                local_tz = timezone(timedelta(seconds=local_tz_offset))
                created_at_with_tz = db_experiment.created_at.replace(tzinfo=local_tz)
                created_at_str = created_at_with_tz.isoformat()
            else:
                created_at_str = db_experiment.created_at.isoformat()
        
        return {
            "success": True,
            "message": f"实验 '{experiment_info.get('name', 'N/A')}' 上传成功",
            "experiment_id": db_experiment.id,
            "experiment_name": db_experiment.name or experiment_info.get('name', 'N/A'),
            "project_name_en": project_name_en,
            "created_at": created_at_str
        }
    
    except HTTPException:
        raise
    except Exception as e:
        logger.exception(f"上传实验失败: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"上传实验失败: {str(e)}"
        )
    finally:
        # 清理临时目录
        if os.path.exists(temp_dir):
            shutil.rmtree(temp_dir, ignore_errors=True)


@router.get("/{project_name_en}/experiments")
async def get_experiment_list(
    project_name_en: str,
    skip: int = 0,
    limit: int = 100,
    db: Session = Depends(get_db)
):
    """
    获取项目的实验运行记录列表
    
    Args:
        project_name_en: 项目英文名称
        skip: 跳过的记录数
        limit: 返回的最大记录数
        db: 数据库会话
    
    Returns:
        实验运行记录列表
    """
    # 验证项目是否存在
    project = get_project_by_name_en(db, project_name_en)
    if not project:
        raise HTTPException(
            status_code=404,
            detail=f"项目 '{project_name_en}' 不存在"
        )
    
    # 获取实验列表
    experiments = experiment_crud.get_experiment_runs_by_project_name_en(
        db, project_name_en, skip=skip, limit=limit
    )
    
    # 格式化返回数据
    result = []
    for exp in experiments:
        result.append({
            "id": exp.id,
            "project_id": exp.project_id,
            "name": exp.name,
            "description": exp.description,
            "snapshot_file_path": exp.snapshot_file_path,
            "created_at": format_datetime_for_json(exp.created_at)
        })
    
    return result


@router.put("/{project_name_en}/experiments/{experiment_id}")
async def update_experiment_basic_info(
    project_name_en: str,
    experiment_id: int,
    payload: ExperimentUpdate,
    db: Session = Depends(get_db)
):
    """
    更新实验的基础信息（名称与描述）
    """
    project = get_project_by_name_en(db, project_name_en)
    if not project:
        raise HTTPException(
            status_code=404,
            detail=f"项目 '{project_name_en}' 不存在"
        )

    experiment = experiment_crud.get_experiment_run(db, experiment_id)
    if not experiment:
        raise HTTPException(
            status_code=404,
            detail=f"实验 ID {experiment_id} 不存在"
        )

    if experiment.project_id != project.id:
        raise HTTPException(
            status_code=400,
            detail=f"实验 ID {experiment_id} 不属于项目 '{project_name_en}'"
        )

    update_data = payload.model_dump(exclude_unset=True) if hasattr(payload, "model_dump") else payload.dict(exclude_unset=True)

    if not update_data:
        raise HTTPException(
            status_code=400,
            detail="请至少提供实验名称或描述"
        )

    updated_experiment = experiment_crud.update_experiment_run(
        db,
        experiment_id,
        name=update_data.get("name"),
        description=update_data.get("description")
    )

    if not updated_experiment:
        raise HTTPException(
            status_code=500,
            detail="更新实验失败"
        )

    return {
        "id": updated_experiment.id,
        "project_id": updated_experiment.project_id,
        "name": updated_experiment.name,
        "description": updated_experiment.description,
        "snapshot_file_path": updated_experiment.snapshot_file_path,
        "created_at": format_datetime_for_json(updated_experiment.created_at)
    }


@router.delete("/{project_name_en}/experiments/{experiment_id}")
async def delete_experiment(
    project_name_en: str,
    experiment_id: int,
    db: Session = Depends(get_db)
):
    """
    删除实验运行记录及其所有关联数据
    
    Args:
        project_name_en: 项目英文名称
        experiment_id: 实验运行ID
        db: 数据库会话
    
    Returns:
        删除成功消息
    """
    # 验证项目是否存在
    project = get_project_by_name_en(db, project_name_en)
    if not project:
        raise HTTPException(
            status_code=404,
            detail=f"项目 '{project_name_en}' 不存在"
        )
    
    # 获取实验记录
    experiment = experiment_crud.get_experiment_run(db, experiment_id)
    if not experiment:
        raise HTTPException(
            status_code=404,
            detail=f"实验 ID {experiment_id} 不存在"
        )
    
    # 验证实验是否属于该项目
    if experiment.project_id != project.id:
        raise HTTPException(
            status_code=400,
            detail=f"实验 ID {experiment_id} 不属于项目 '{project_name_en}'"
        )
    
    # 删除实验的存储目录（如果存在）
    storage_path = get_experiment_storage_path(project_name_en, experiment_id)
    if storage_path.exists():
        try:
            shutil.rmtree(storage_path, ignore_errors=True)
            logger.info(f"已删除实验存储目录: {storage_path}")
        except Exception as e:
            logger.warning(f"删除实验存储目录失败: {str(e)}")
    
    # 删除数据库记录（级联删除会处理关联数据）
    success = experiment_crud.delete_experiment_run(db, experiment_id)
    
    if not success:
        raise HTTPException(
            status_code=500,
            detail="删除实验失败"
        )
    
    return MessageResponse(message=f"实验 ID {experiment_id} 已成功删除")


@router.get("/{project_name_en}/experiments/{experiment_id}")
async def get_experiment_detail(
    project_name_en: str,
    experiment_id: int,
    db: Session = Depends(get_db)
):
    """
    获取实验的完整数据（包括分类、序列数据、单值数据、结果文件）
    
    Args:
        project_name_en: 项目英文名称
        experiment_id: 实验运行ID
        db: 数据库会话
    
    Returns:
        实验完整数据
    """
    # 验证项目是否存在
    project = get_project_by_name_en(db, project_name_en)
    if not project:
        raise HTTPException(
            status_code=404,
            detail=f"项目 '{project_name_en}' 不存在"
        )
    
    # 获取实验完整数据
    experiment_data = experiment_crud.get_experiment_full_data(db, experiment_id)
    
    if not experiment_data:
        raise HTTPException(
            status_code=404,
            detail=f"实验 ID {experiment_id} 不存在"
        )
    
    # 验证实验是否属于该项目
    if experiment_data.get("project_id") != project.id:
        raise HTTPException(
            status_code=400,
            detail=f"实验 ID {experiment_id} 不属于项目 '{project_name_en}'"
        )
    
    # 处理所有时间字段
    experiment_data["created_at"] = format_datetime_for_json(experiment_data.get("created_at"))
    
    # 处理分类中的时间字段
    for category in experiment_data.get("categories", []):
        category["created_at"] = format_datetime_for_json(category.get("created_at"))
        
        # 处理序列数据中的时间字段
        for series in category.get("series_data", []):
            series["created_at"] = format_datetime_for_json(series.get("created_at"))
        
        # 处理单值数据中的时间字段
        for scalar in category.get("scalar_data", []):
            scalar["created_at"] = format_datetime_for_json(scalar.get("created_at"))
    
    # 处理结果文件中的时间字段
    for result in experiment_data.get("results", []):
        result["created_at"] = format_datetime_for_json(result.get("created_at"))
    
    return experiment_data


@router.get("/{project_name_en}/experiments/{experiment_id}/series/{series_id}/data")
async def get_series_data(
    project_name_en: str,
    experiment_id: int,
    series_id: int,
    skip: int = Query(0, ge=0, description="跳过的行数"),
    limit: int = Query(100, ge=1, le=10000, description="返回的最大行数"),
    db: Session = Depends(get_db)
):
    """
    获取序列数据的内容（从Parquet文件读取）
    
    Args:
        project_name_en: 项目英文名称
        experiment_id: 实验运行ID
        series_id: 序列数据ID
        skip: 跳过的行数（分页）
        limit: 返回的最大行数（分页）
        db: 数据库会话
    
    Returns:
        包含数据行和总数的响应
    """
    if not HAS_PANDAS:
        raise HTTPException(
            status_code=500,
            detail="服务器未安装pandas，无法读取Parquet文件"
        )
    
    # 验证项目是否存在
    project = get_project_by_name_en(db, project_name_en)
    if not project:
        raise HTTPException(
            status_code=404,
            detail=f"项目 '{project_name_en}' 不存在"
        )
    
    # 获取序列数据记录
    from ..db.experiments.models import ExperimentDataSeries
    series = db.query(ExperimentDataSeries).filter(
        ExperimentDataSeries.id == series_id,
        ExperimentDataSeries.experiment_run_id == experiment_id
    ).first()
    
    if not series:
        raise HTTPException(
            status_code=404,
            detail=f"序列数据 ID {series_id} 不存在或不属于实验 {experiment_id}"
        )
    
    # 构建Parquet文件路径
    parquet_file_path = Path(EXPERIMENT_RESULTS_ROOT) / series.parquet_path
    
    if not parquet_file_path.exists():
        raise HTTPException(
            status_code=404,
            detail=f"Parquet文件不存在: {series.parquet_path}"
        )
    
    try:
        # 读取Parquet文件
        df = pd.read_parquet(parquet_file_path)
        
        # 获取总数
        total_rows = len(df)
        
        # 分页处理
        start_idx = skip
        end_idx = min(skip + limit, total_rows)
        df_page = df.iloc[start_idx:end_idx]
        
        # 转换为字典列表（列名作为键）
        data_rows = df_page.to_dict(orient='records')
        
        # 获取列信息
        columns_info = [
            {
                "name": col,
                "type": str(df[col].dtype)
            }
            for col in df.columns
        ]
        
        return {
            "series_id": series_id,
            "series_name": series.name,
            "columns": columns_info,
            "total_rows": total_rows,
            "skip": skip,
            "limit": limit,
            "data": data_rows,
            "has_more": end_idx < total_rows
        }
        
    except Exception as e:
        logger.exception(f"读取Parquet文件失败: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"读取Parquet文件失败: {str(e)}"
        )


def _auto_fit_columns(ws):
    from openpyxl.utils import get_column_letter
    from openpyxl.cell.cell import MergedCell
    
    for col_idx, column_cells in enumerate(ws.columns, start=1):
        max_length = 0
        # 找到第一个非合并单元格来获取列字母
        column_letter = None
        for cell in column_cells:
            if not isinstance(cell, MergedCell):
                column_letter = cell.column_letter
                break
        
        # 如果所有单元格都是合并单元格，使用列索引计算列字母
        if column_letter is None:
            column_letter = get_column_letter(col_idx)
        
        # 计算该列的最大内容长度
        for cell in column_cells:
            if isinstance(cell, MergedCell) or cell.value is None:
                continue
            value_str = str(cell.value)
            if len(value_str) > max_length:
                max_length = len(value_str)
        
        adjusted_width = min(max(max_length + 4, 12), 80)
        ws.column_dimensions[column_letter].width = adjusted_width


def _safe_sheet_name(name: str, used_names: set[str]) -> str:
    base = "".join(c for c in name if c not in r'[]:*?/\\')
    if not base:
        base = "Sheet"
    base = base[:25]
    candidate = base
    suffix = 1
    while candidate in used_names:
        suffix += 1
        candidate = f"{base[:25 - len(str(suffix))]}{suffix}"
    used_names.add(candidate)
    return candidate


def _format_scalar_value(raw_value: Optional[str]) -> str:
    if raw_value is None:
        return ""
    try:
        parsed = json.loads(raw_value)
    except (ValueError, TypeError):
        return str(raw_value)
    if isinstance(parsed, (dict, list)):
        return json.dumps(parsed, ensure_ascii=False)
    return str(parsed)


@router.get("/{project_name_en}/experiments/{experiment_id}/results/export/excel")
async def export_experiment_results_excel(
    project_name_en: str,
    experiment_id: int,
    background_tasks: BackgroundTasks = BackgroundTasks(),
    db: Session = Depends(get_db),
):
    """
    导出指定实验的结果数据为Excel文件

    - Sheet1: 实验概览
    - 后续每个Sheet: 
      - 每个分类的单值数据创建独立sheet（命名为：{分类名}_单值）
      - 每个分类的所有序列数据合并到一个sheet（命名为：{分类名}_序列）
    """
    if not HAS_OPENPYXL:
        raise HTTPException(
            status_code=500,
            detail="服务器未安装openpyxl，无法生成Excel文件",
        )

    # 验证项目是否存在
    project = get_project_by_name_en(db, project_name_en)
    if not project:
        raise HTTPException(
            status_code=404,
            detail=f"项目 '{project_name_en}' 不存在",
        )

    experiment_data = experiment_crud.get_experiment_full_data(db, experiment_id)
    if not experiment_data:
        raise HTTPException(
            status_code=404,
            detail=f"实验 ID {experiment_id} 不存在",
        )

    if experiment_data.get("project_id") != project.id:
        raise HTTPException(
            status_code=400,
            detail=f"实验 ID {experiment_id} 不属于项目 '{project_name_en}'",
        )

    categories = experiment_data.get("categories", []) or []
    need_series_data = any(
        (category.get("series_data") or [])
        for category in categories
    )
    if need_series_data and not HAS_PANDAS:
        raise HTTPException(
            status_code=500,
            detail="服务器未安装pandas，无法导出序列数据",
        )

    temp_dir = Path(tempfile.mkdtemp())
    try:
        wb = Workbook()
        ws_overview = wb.active
        ws_overview.title = "实验概览"

        header_font = Font(bold=True)
        header_fill = PatternFill("solid", fgColor="E6F4FF")
        thin_border = Border(
            left=Side(style="thin"),
            right=Side(style="thin"),
            top=Side(style="thin"),
            bottom=Side(style="thin"),
        )
        center_alignment = Alignment(vertical="center")

        overview_rows = [
            ("实验名称", experiment_data.get("name") or f"实验 #{experiment_id}"),
            ("实验ID", experiment_id),
            ("所属项目", f"{project.name} ({project.name_en})"),
            ("创建时间", format_datetime_for_json(experiment_data.get("created_at"))),
            ("实验描述", experiment_data.get("description") or ""),
            ("存在快照", "是" if experiment_data.get("snapshot_file_path") else "否"),
            ("分类数量", len(categories)),
            ("序列数量", sum(len(cat.get("series_data") or []) for cat in categories)),
            ("单值指标数量", sum(len(cat.get("scalar_data") or []) for cat in categories)),
        ]
        ws_overview.append(["字段", "内容"])
        for cell in ws_overview[1]:
            cell.font = header_font
            cell.fill = header_fill
            cell.border = thin_border
            cell.alignment = center_alignment

        for field, value in overview_rows:
            ws_overview.append([field, value])
        for row in ws_overview.iter_rows(min_row=2, max_row=ws_overview.max_row, min_col=1, max_col=2):
            for cell in row:
                cell.border = thin_border
                cell.alignment = center_alignment if cell.column == 1 else Alignment(vertical="top", wrap_text=True)
        _auto_fit_columns(ws_overview)

        used_sheet_names: set[str] = {"实验概览"}

        for category in categories:
            category_name = category.get("name") or f"分类{category.get('id')}"
            category_description = category.get("description") or ""
            
            # 单值指标 - 每个分类的单值数据创建独立sheet
            scalar_data = category.get("scalar_data") or []
            if scalar_data:
                scalar_sheet_name = _safe_sheet_name(f"{category_name}_单值", used_sheet_names)
                ws_scalar = wb.create_sheet(scalar_sheet_name)
                
                # 标题
                ws_scalar.append([f"{category_name} - 单值指标"])
                ws_scalar.merge_cells(start_row=1, start_column=1, end_row=1, end_column=3)
                title_cell = ws_scalar.cell(row=1, column=1)
                title_cell.font = Font(bold=True, size=14)
                title_cell.alignment = Alignment(horizontal="left", vertical="center")
                
                # 分类描述
                if category_description:
                    ws_scalar.append(["分类描述", category_description])
                    ws_scalar.merge_cells(start_row=2, start_column=2, end_row=2, end_column=3)
                    desc_cell = ws_scalar.cell(row=2, column=1)
                    desc_cell.font = Font(bold=True)
                    for col in range(1, 4):
                        cell = ws_scalar.cell(row=2, column=col)
                        cell.border = thin_border
                        if col == 1:
                            cell.alignment = center_alignment
                        else:
                            cell.alignment = Alignment(vertical="top", wrap_text=True)
                    start_data_row = 4
                else:
                    start_data_row = 3
                
                # 表头
                ws_scalar.append([])
                header_row = ws_scalar.max_row + 1
                ws_scalar.append(["指标名称", "数值", "单位"])
                for cell in ws_scalar[header_row]:
                    cell.font = header_font
                    cell.fill = header_fill
                    cell.border = thin_border
                    cell.alignment = center_alignment
                
                # 数据行
                for scalar in scalar_data:
                    ws_scalar.append([
                        scalar.get("name"),
                        _format_scalar_value(scalar.get("value")),
                        scalar.get("unit") or "",
                    ])
                
                # 格式化数据行
                for row in ws_scalar.iter_rows(min_row=header_row + 1, max_row=ws_scalar.max_row, min_col=1, max_col=3):
                    for cell in row:
                        cell.border = thin_border
                        cell.alignment = Alignment(vertical="top", wrap_text=True)
                
                _auto_fit_columns(ws_scalar)
            
            # 序列数据 - 每个分类的所有序列数据合并到一个表格，共用索引
            series_data = category.get("series_data") or []
            if series_data:
                series_sheet_name = _safe_sheet_name(f"{category_name}_序列", used_sheet_names)
                ws_series = wb.create_sheet(series_sheet_name)
                
                # 标题
                ws_series.append([f"{category_name} - 序列数据"])
                ws_series.merge_cells(start_row=1, start_column=1, end_row=1, end_column=100)
                title_cell = ws_series.cell(row=1, column=1)
                title_cell.font = Font(bold=True, size=14)
                title_cell.alignment = Alignment(horizontal="left", vertical="center")
                
                # 分类描述
                if category_description:
                    ws_series.append(["分类描述", category_description])
                    ws_series.merge_cells(start_row=2, start_column=2, end_row=2, end_column=100)
                    desc_cell = ws_series.cell(row=2, column=1)
                    desc_cell.font = Font(bold=True)
                    for col in range(1, 101):
                        cell = ws_series.cell(row=2, column=col)
                        cell.border = thin_border
                        if col == 1:
                            cell.alignment = center_alignment
                        else:
                            cell.alignment = Alignment(vertical="top", wrap_text=True)
                
                # 合并所有序列数据到一个表格
                if HAS_PANDAS:
                    try:
                        # 读取所有序列数据
                        dfs_to_merge = []
                        series_info = []  # 存储序列名称和单位信息
                        common_index_name = None
                        ordered_index_values = []  # 保持原始顺序的索引值列表
                        all_index_values_set = set()  # 用于快速查找
                        
                        for series in series_data:
                            parquet_path = series.get("parquet_path")
                            if not parquet_path:
                                continue
                            
                            data_file_path = Path(EXPERIMENT_RESULTS_ROOT) / parquet_path
                            if not data_file_path.exists():
                                continue
                            
                            try:
                                df = pd.read_parquet(data_file_path)
                                series_name = series.get("name") or f"序列{series.get('id')}"
                                series_unit = series.get("unit") or ""
                                index_col_name = series.get("index_column_name") or "index"
                                
                                # 确定共同的索引列名（使用第一个序列的索引列名，或默认"index"）
                                if common_index_name is None:
                                    common_index_name = index_col_name or "index"
                                
                                # 处理索引
                                df_reset = df.reset_index(drop=False)
                                
                                # 去掉level_0等列
                                columns_to_drop = [col for col in df_reset.columns if col.startswith("level_") or col == "level0" or col == "level1"]
                                for col in columns_to_drop:
                                    if col in df_reset.columns:
                                        df_reset.drop(columns=[col], inplace=True)
                                
                                # 处理索引列
                                if "index" in df_reset.columns and index_col_name != "index":
                                    df_reset.rename(columns={"index": index_col_name}, inplace=True)
                                elif df.index.name and df.index.name in df_reset.columns:
                                    df_reset.rename(columns={df.index.name: index_col_name}, inplace=True)
                                
                                # 确保索引列存在
                                if common_index_name not in df_reset.columns:
                                    df_reset[common_index_name] = range(len(df_reset))
                                
                                # 获取数据列（排除索引列）
                                data_columns = [col for col in df_reset.columns if col != common_index_name]
                                
                                # 如果只有一个数据列，直接使用；如果有多个，取第一个
                                if len(data_columns) == 1:
                                    data_col = data_columns[0]
                                elif len(data_columns) > 1:
                                    data_col = data_columns[0]  # 取第一个数据列
                                else:
                                    continue  # 没有数据列，跳过
                                
                                # 构建序列数据字典：{index_value: data_value}
                                # 同时保持索引值的原始顺序（使用第一个序列的顺序作为基准）
                                series_data_dict = {}
                                current_index_order = []
                                for _, row in df_reset.iterrows():
                                    idx_val = row[common_index_name]
                                    series_data_dict[idx_val] = row[data_col]
                                    current_index_order.append(idx_val)
                                
                                # 如果是第一个序列，使用它的索引顺序作为基准
                                if not ordered_index_values:
                                    ordered_index_values = current_index_order
                                    all_index_values_set = set(ordered_index_values)
                                else:
                                    # 对于后续序列，按照基准顺序排列，然后追加新的索引值
                                    for idx_val in current_index_order:
                                        if idx_val not in all_index_values_set:
                                            ordered_index_values.append(idx_val)
                                            all_index_values_set.add(idx_val)
                                
                                dfs_to_merge.append({
                                    "name": series_name,
                                    "unit": series_unit,
                                    "data": series_data_dict
                                })
                                series_info.append({"name": series_name, "unit": series_unit})
                            except Exception as exc:  # noqa: BLE001
                                logger.warning(f"读取序列数据失败: {series_name}, 错误: {exc}")
                                continue
                        
                        # 构建表格：每行一个序列，列是索引值
                        if dfs_to_merge and ordered_index_values:
                            # 使用保持原始顺序的索引值（不排序）
                            
                            # 构建表头：序列名称 | index1 | index2 | ... | 单位
                            headers = ["序列名称"] + [str(idx) for idx in ordered_index_values] + ["单位"]
                            header_row_index = ws_series.max_row + 1
                            ws_series.append(headers)
                            for cell in ws_series[header_row_index]:
                                cell.font = header_font
                                cell.fill = header_fill
                                cell.border = thin_border
                                cell.alignment = center_alignment
                            
                            # 写入数据行：每个序列一行
                            for series_item in dfs_to_merge:
                                series_name = series_item["name"]
                                series_unit = series_item["unit"]
                                series_data_dict = series_item["data"]
                                
                                row_data = [series_name]
                                
                                # 按原始索引值顺序添加数据（不排序）
                                for idx in ordered_index_values:
                                    data_value = series_data_dict.get(idx, "")
                                    row_data.append(data_value)
                                
                                # 单位放在最后
                                row_data.append(series_unit)
                                
                                ws_series.append(row_data)
                            
                            # 格式化数据行
                            for row in ws_series.iter_rows(min_row=header_row_index + 1, max_row=ws_series.max_row, min_col=1, max_col=len(headers)):
                                for cell in row:
                                    cell.border = thin_border
                                    cell.alignment = Alignment(vertical="top")
                        else:
                            ws_series.append(["无可用序列数据"])
                    except Exception as exc:  # noqa: BLE001
                        logger.exception(f"合并序列数据失败: {exc}")
                        ws_series.append(["数据合并失败", str(exc)])
                else:
                    ws_series.append(["服务器未安装pandas，无法读取序列数据"])
                
                _auto_fit_columns(ws_series)

        experiment_name = experiment_data.get("name") or f"experiment_{experiment_id}"
        safe_name = "".join(c for c in experiment_name if c.isalnum() or c in (" ", "-", "_")).strip()
        safe_name = safe_name.replace(" ", "_") or f"experiment_{experiment_id}"
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = f"{safe_name}_results_{timestamp}.xlsx"
        file_path = temp_dir / filename
        wb.save(file_path)

        background_tasks.add_task(cleanup_temp_dir, temp_dir)

        return FileResponse(
            path=str(file_path),
            filename=filename,
            media_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
        )
    except Exception:
        cleanup_temp_dir(temp_dir)
        raise
@router.get("/{project_name_en}/experiments/{experiment_id}/snapshot")
async def get_experiment_snapshot(
    project_name_en: str,
    experiment_id: int,
    db: Session = Depends(get_db)
):
    """
    获取实验的快照文件内容（输入参数）
    
    Args:
        project_name_en: 项目英文名称
        experiment_id: 实验运行ID
        db: 数据库会话
    
    Returns:
        快照文件内容（JSON格式）
    """
    # 验证项目是否存在
    project = get_project_by_name_en(db, project_name_en)
    if not project:
        raise HTTPException(
            status_code=404,
            detail=f"项目 '{project_name_en}' 不存在"
        )
    
    # 获取实验记录
    experiment = experiment_crud.get_experiment_run(db, experiment_id)
    if not experiment:
        raise HTTPException(
            status_code=404,
            detail=f"实验 ID {experiment_id} 不存在"
        )
    
    # 验证实验是否属于该项目
    if experiment.project_id != project.id:
        raise HTTPException(
            status_code=400,
            detail=f"实验 ID {experiment_id} 不属于项目 '{project_name_en}'"
        )
    
    # 检查是否有快照文件路径
    if not experiment.snapshot_file_path:
        raise HTTPException(
            status_code=404,
            detail=f"实验 ID {experiment_id} 没有快照文件"
        )
    
    # 构建快照文件路径
    snapshot_file_path = Path(EXPERIMENT_RESULTS_ROOT) / experiment.snapshot_file_path
    
    if not snapshot_file_path.exists():
        raise HTTPException(
            status_code=404,
            detail=f"快照文件不存在: {experiment.snapshot_file_path}"
        )
    
    try:
        # 读取JSON快照文件
        with open(snapshot_file_path, 'r', encoding='utf-8') as f:
            snapshot_data = json.load(f)
        
        # 转换格式：{"project": {...}, "categories": [...]} -> {"metadata": {...}, "categories": [...]}
        if "project" in snapshot_data:
            payload = {
                "metadata": snapshot_data["project"],
                "categories": snapshot_data.get("categories", [])
            }
            # 将参数值从 [{index, value}] 格式转换为值数组
            for category in payload["categories"]:
                for param in category.get("parameters", []):
                    if isinstance(param.get("values"), list) and len(param.get("values", [])) > 0:
                        if isinstance(param["values"][0], dict) and "value" in param["values"][0]:
                            param["values"] = [v.get("value") for v in param["values"]]
            return payload
        return snapshot_data
            
    except HTTPException:
        raise
    except Exception as e:
        logger.exception(f"读取快照文件失败: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"读取快照文件失败: {str(e)}"
        )


def cleanup_temp_dir(temp_dir: Path):
    """清理临时目录（后台任务）"""
    try:
        if temp_dir.exists():
            shutil.rmtree(temp_dir)
            logger.debug(f"已清理临时目录: {temp_dir}")
    except Exception as e:
        logger.warning(f"清理临时目录失败: {temp_dir}, 错误: {e}")


@router.get("/{project_name_en}/experiments/{experiment_id}/snapshot/download")
async def download_experiment_snapshot(
    project_name_en: str,
    experiment_id: int,
    format: str = Query("json", description="导出格式：json, excel, word, markdown"),
    background_tasks: BackgroundTasks = BackgroundTasks(),
    db: Session = Depends(get_db)
):
    """
    下载实验的快照文件（支持格式转换）
    
    Args:
        project_name_en: 项目英文名称
        experiment_id: 实验运行ID
        format: 导出格式（json, excel, word, markdown）
        db: 数据库会话
    
    Returns:
        文件下载响应
    """
    # 验证项目是否存在
    project = get_project_by_name_en(db, project_name_en)
    if not project:
        raise HTTPException(
            status_code=404,
            detail=f"项目 '{project_name_en}' 不存在"
        )
    
    # 获取实验记录
    experiment = experiment_crud.get_experiment_run(db, experiment_id)
    if not experiment:
        raise HTTPException(
            status_code=404,
            detail=f"实验 ID {experiment_id} 不存在"
        )
    
    # 验证实验是否属于该项目
    if experiment.project_id != project.id:
        raise HTTPException(
            status_code=400,
            detail=f"实验 ID {experiment_id} 不属于项目 '{project_name_en}'"
        )
    
    # 检查是否有快照文件路径
    if not experiment.snapshot_file_path:
        raise HTTPException(
            status_code=404,
            detail=f"实验 ID {experiment_id} 没有快照文件"
        )
    
    # 构建快照文件路径
    snapshot_file_path = Path(EXPERIMENT_RESULTS_ROOT) / experiment.snapshot_file_path
    
    if not snapshot_file_path.exists():
        raise HTTPException(
            status_code=404,
            detail=f"快照文件不存在: {experiment.snapshot_file_path}"
        )
    
    try:
        # 解析导出格式
        try:
            output_format = IOFormat(format.lower())
        except ValueError:
            raise HTTPException(
                status_code=400,
                detail=f"不支持的导出格式: {format}。支持的格式: {', '.join([f.value for f in FormatConverter.get_supported_export_formats()])}"
            )
        
        # 输入格式固定为JSON（因为snapshot统一使用JSON存储）
        input_format = IOFormat.JSON
        
        # 使用格式转换器进行转换
        temp_dir = Path(tempfile.mkdtemp())
        result = FormatConverter.convert(
            input_path=snapshot_file_path,
            input_format=input_format,
            output_format=output_format,
            output_dir=temp_dir
        )
        
        # 获取格式信息以确定文件扩展名
        format_info = FormatConverter.get_format_info(output_format, is_export=True)
        # 兼容不同的返回格式：extensions（复数）或extension（单数）
        extensions = format_info.get("extensions", [])
        if not extensions and "extension" in format_info:
            extensions = [format_info["extension"]]
        file_ext = extensions[0] if extensions else f".{output_format.value}"
        
        # 生成下载文件名
        experiment_name = experiment.name or f"experiment_{experiment_id}"
        # 清理文件名中的特殊字符
        safe_name = "".join(c for c in experiment_name if c.isalnum() or c in (' ', '-', '_')).strip()
        safe_name = safe_name.replace(' ', '_')
        download_filename = f"{safe_name}_snapshot{file_ext}"
        
        # 返回文件，并在后台任务中清理临时目录
        # 注意：需要在文件发送完成后清理，所以使用background_tasks
        background_tasks.add_task(cleanup_temp_dir, temp_dir)
        
        return FileResponse(
            path=str(result.file_path),
            filename=download_filename,
            media_type=result.mime_type or "application/octet-stream"
        )
            
    except HTTPException:
        raise
    except Exception as e:
        logger.exception(f"下载快照文件失败: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"下载快照文件失败: {str(e)}"
        )


@router.get("/{project_name_en}/experiments/{experiment_id}/snapshot/formats")
async def get_snapshot_export_formats(
    project_name_en: str,
    experiment_id: int,
    db: Session = Depends(get_db)
):
    """
    获取实验快照支持的导出格式列表
    
    Args:
        project_name_en: 项目英文名称
        experiment_id: 实验运行ID
        db: 数据库会话
    
    Returns:
        支持的导出格式列表
    """
    # 验证项目是否存在
    project = get_project_by_name_en(db, project_name_en)
    if not project:
        raise HTTPException(
            status_code=404,
            detail=f"项目 '{project_name_en}' 不存在"
        )
    
    # 获取实验记录
    experiment = experiment_crud.get_experiment_run(db, experiment_id)
    if not experiment:
        raise HTTPException(
            status_code=404,
            detail=f"实验 ID {experiment_id} 不存在"
        )
    
    # 验证实验是否属于该项目
    if experiment.project_id != project.id:
        raise HTTPException(
            status_code=400,
            detail=f"实验 ID {experiment_id} 不属于项目 '{project_name_en}'"
        )
    
    # 获取所有支持的导出格式
    export_formats = FormatConverter.get_supported_export_formats()
    
    # 获取每个格式的详细信息
    formats_info = []
    for fmt in export_formats:
        format_info = FormatConverter.get_format_info(fmt, is_export=True)
        formats_info.append({
            "format": fmt.value,
            "name": format_info.get("name", fmt.value),
            "extensions": format_info.get("extensions", []),
            "description": format_info.get("description", "")
        })
    
    return {
        "formats": formats_info
    }

