from fastapi import APIRouter, HTTPException, UploadFile, File, Form, Request
from pydantic import BaseModel
import os
from fastapi.responses import JSONResponse, HTMLResponse
import random
import string
from typing import List, Optional, Dict, Any, Union
import pandas as pd
import tempfile
import show_plt_data_frame as spdf
from fastapi.templating import Jinja2Templates
import json
import io
import numpy as np

router_module_real = APIRouter()
# 定义模板目录
templates = Jinja2Templates(directory="static")

class DataFrameInfo(BaseModel):
    file_path: str
    columns: List[str]
    shape: List[int]

class VisualizationRequest(BaseModel):
    file_path: str
    plot_type: str
    x_column: str
    y_column: Optional[str] = None
    y_columns: Optional[List[str]] = None
    title: Optional[str] = None
    y_label: Optional[str] = None  # Y轴标签，默认为空，柱状图会使用"数量"作为默认值

class FilterCondition(BaseModel):
    column: str
    operator: str  # eq, gt, lt, ge, le, contains, between
    value: Union[str, int, float]
    value2: Optional[Union[str, int, float]] = None  # 用于between操作符

class FilterRequest(BaseModel):
    file_path: str
    conditions: List[FilterCondition]

# 获取模块信息
@router_module_real.get("/")
def index():
    return {"message": "pandas数据分析与可视化模块", "version": "1.0.0"}

# 上传文件
@router_module_real.post("/upload-file")
async def upload_file(file: UploadFile = File(...)):
    """
    上传CSV或Excel文件
    """
    # 确保上传目录存在
    upload_dir = os.path.join("static", "uploads")
    os.makedirs(upload_dir, exist_ok=True)
    
    # 生成一个随机的文件名，以避免文件名冲突
    file_extension = os.path.splitext(file.filename)[1]
    random_filename = ''.join(random.choices(string.ascii_letters + string.digits, k=10))
    file_location = os.path.join(upload_dir, f"{random_filename}{file_extension}")
    
    # 保存上传的文件
    try:
        with open(file_location, "wb") as buffer:
            buffer.write(await file.read())
        
        # 尝试加载文件，确保其为有效的CSV或Excel文件
        handler = spdf.DataFrameHandler()
        df = handler.load_file(file_location)
        
        return {
            "success": True,
            "filename": file.filename,
            "saved_location": file_location,
            "columns": df.columns.tolist(),
            "shape": list(df.shape)
        }
    except Exception as e:
        if os.path.exists(file_location):
            os.remove(file_location)
        raise HTTPException(status_code=400, detail=f"无法处理文件: {str(e)}")

# 获取文件基本信息
@router_module_real.get("/file-info/{file_path:path}")
async def get_file_info(file_path: str):
    """
    获取已上传文件的基本信息
    """
    try:
        handler = spdf.DataFrameHandler()
        df = handler.load_file(file_path)
        info = handler.get_df_info(df)
        return info
    except Exception as e:
        raise HTTPException(status_code=400, detail=f"无法读取文件: {str(e)}")

# 数据预览API
@router_module_real.get("/data-preview/{file_path:path}")
async def get_data_preview(file_path: str):
    """
    获取数据预览，包括df.info()和df.head()
    """
    try:
        handler = spdf.DataFrameHandler()
        df = handler.load_file(file_path)
        
        # 获取数据类型信息
        buffer = io.StringIO()
        df.info(buf=buffer)
        info_str = buffer.getvalue()
        
        # 获取前5行数据
        head_data = df.head(5).to_dict(orient="records")
        
        # 获取每列的数据类型
        dtypes = {col: str(dtype) for col, dtype in zip(df.columns, df.dtypes)}
        
        # 获取唯一值计数
        unique_counts = {col: int(df[col].nunique()) for col in df.columns}
        
        # 获取缺失值计数
        null_counts = {col: int(df[col].isna().sum()) for col in df.columns}
        
        # 获取描述统计信息
        describe_data = None
        try:
            # 仅对数值列进行describe()
            numeric_df = df.select_dtypes(include=['number'])
            if not numeric_df.empty:
                # 使用to_dict后再处理，确保所有numpy类型被转换
                desc_dict = df.describe().to_dict()
                describe_data = {}
                for col, stats in desc_dict.items():
                    describe_data[col] = {}
                    for stat, val in stats.items():
                        if isinstance(val, (np.integer, np.floating)):
                            # 检查无限值
                            if np.isfinite(val):
                                describe_data[col][stat] = float(val)
                            else:
                                describe_data[col][stat] = None
                        else:
                            describe_data[col][stat] = val if val is not None else None
        except Exception as e:
            print(f"处理描述统计时出错: {str(e)}")
            
        # 统计函数含义说明
        describe_functions = {
            "count": "非NA值的数量",
            "mean": "平均值 - 所有值的算术平均数",
            "std": "标准差 - 衡量数据分布的离散程度",
            "min": "最小值 - 数据中的最小值",
            "25%": "第一四分位数 - 25%的数据小于此值",
            "50%": "中位数 - 数据的中间值，50%的数据小于此值",
            "75%": "第三四分位数 - 75%的数据小于此值",
            "max": "最大值 - 数据中的最大值"
        }
        
        return {
            "info": info_str,
            "head": head_data,
            "dtypes": dtypes,
            "columns": df.columns.tolist(),
            "rows": len(df),
            "unique_counts": unique_counts,
            "null_counts": null_counts,
            "describe": describe_data,
            "describe_functions": describe_functions
        }
    except Exception as e:
        raise HTTPException(status_code=400, detail=f"无法获取数据预览: {str(e)}")

# DataFrame属性和方法API
@router_module_real.get("/dataframe-info/{file_path:path}")
async def get_dataframe_info(file_path: str):
    """
    获取DataFrame的属性和可用方法
    """
    try:
        handler = spdf.DataFrameHandler()
        df = handler.load_file(file_path)
        
        # 基本信息
        basic_info = {
            "shape": [int(df.shape[0]), int(df.shape[1])],
            "size": int(df.size),
            "ndim": int(df.ndim),
            "memory_usage": int(df.memory_usage(deep=True).sum()),
            "memory_usage_human": f"{df.memory_usage(deep=True).sum() / (1024*1024):.2f} MB"
        }
        
        # 列统计信息
        col_stats = {}
        for col in df.columns:
            try:
                col_stats[col] = {
                    "type": str(df[col].dtype),
                    "null_count": int(df[col].isna().sum()),
                    "unique_count": int(df[col].nunique()),
                }
                
                # 针对数值型列添加统计信息
                if pd.api.types.is_numeric_dtype(df[col]):
                    # 安全处理可能的无限值和NaN
                    min_val = float(df[col].min()) if not pd.isna(df[col].min()) and np.isfinite(df[col].min()) else None
                    max_val = float(df[col].max()) if not pd.isna(df[col].max()) and np.isfinite(df[col].max()) else None
                    mean_val = float(df[col].mean()) if not pd.isna(df[col].mean()) and np.isfinite(df[col].mean()) else None
                    median_val = float(df[col].median()) if not pd.isna(df[col].median()) and np.isfinite(df[col].median()) else None
                    std_val = float(df[col].std()) if not pd.isna(df[col].std()) and np.isfinite(df[col].std()) else None
                    
                    col_stats[col].update({
                        "min": min_val,
                        "max": max_val,
                        "mean": mean_val,
                        "median": median_val,
                        "std": std_val
                    })
                # 针对字符串列
                elif pd.api.types.is_string_dtype(df[col]) or pd.api.types.is_object_dtype(df[col]):
                    # 最常见的值（前5个）
                    most_common = df[col].value_counts().head(5).to_dict()
                    col_stats[col]["most_common"] = {str(k): int(v) for k, v in most_common.items()}
                    
                    # 平均字符长度
                    try:
                        mean_len = float(df[col].astype(str).str.len().mean())
                        col_stats[col]["mean_length"] = mean_len if np.isfinite(mean_len) else None
                    except:
                        pass
                # 日期时间列
                elif pd.api.types.is_datetime64_dtype(df[col]):
                    col_stats[col].update({
                        "min_date": str(df[col].min()),
                        "max_date": str(df[col].max()),
                        "range_days": int((df[col].max() - df[col].min()).days) if not pd.isna(df[col].min()) and not pd.isna(df[col].max()) else None
                    })
            except Exception as e:
                col_stats[col]["error"] = str(e)
        
        # 获取DataFrame的属性列表
        # 筛选掉以_开头的私有属性和方法
        df_attrs = [attr for attr in dir(df) if not attr.startswith('_')]
        
        # 按照类别分组的属性和方法
        methods_by_category = {
            "统计函数": ["describe", "count", "mean", "median", "min", "max", "std", "var", "sum", "cumsum", "quantile", "corr", "cov"],
            "索引和选择": ["loc", "iloc", "at", "iat", "filter", "head", "tail", "sample", "nlargest", "nsmallest"],
            "缺失值处理": ["isna", "notna", "dropna", "fillna", "interpolate", "replace"],
            "分组和聚合": ["groupby", "pivot", "pivot_table", "crosstab", "melt", "stack", "unstack"],
            "合并和连接": ["merge", "join", "concat", "append"],
            "排序和排名": ["sort_values", "sort_index", "rank"],
            "形状变换": ["transpose", "T", "shift", "diff", "pct_change", "rolling", "expanding"],
            "数据类型转换": ["astype", "infer_objects", "convert_dtypes"],
            "索引操作": ["set_index", "reset_index", "reindex"],
            "输出和格式化": ["to_csv", "to_excel", "to_json", "to_dict", "to_string"],
            "其他属性": [attr for attr in df_attrs if not callable(getattr(df, attr, None))]
        }
        
        # 将DataFrame的所有方法按类别组织
        all_methods = {}
        for category, methods in methods_by_category.items():
            all_methods[category] = []
            for method in methods:
                if method in df_attrs:
                    try:
                        doc = getattr(df, method).__doc__
                        if doc:
                            # 提取文档的第一行作为简短描述
                            short_doc = doc.split('\n')[0].strip()
                        else:
                            short_doc = "无文档描述"
                        
                        all_methods[category].append({
                            "name": method,
                            "description": short_doc,
                            "is_callable": callable(getattr(df, method))
                        })
                    except:
                        all_methods[category].append({
                            "name": method,
                            "description": "无法获取描述",
                            "is_callable": callable(getattr(df, method, None))
                        })
        
        # 生成数据描述统计
        describe_data = None
        try:
            # 使用to_dict后再处理，确保所有numpy类型被转换
            desc_dict = df.describe().to_dict()
            describe_data = {}
            for col, stats in desc_dict.items():
                describe_data[col] = {}
                for stat, val in stats.items():
                    if isinstance(val, (np.integer, np.floating)):
                        # 检查无限值
                        if np.isfinite(val):
                            describe_data[col][stat] = float(val)
                        else:
                            describe_data[col][stat] = None
                    else:
                        describe_data[col][stat] = val if val is not None else None
        except Exception as e:
            print(f"处理描述统计时出错: {str(e)}")
            pass
        
        # 相关性矩阵（仅针对数值列）
        correlation = None
        try:
            numeric_df = df.select_dtypes(include=['number'])
            if not numeric_df.empty and numeric_df.shape[1] > 1:
                # 转换相关性矩阵，确保所有numpy类型被转换
                corr_matrix = numeric_df.corr()
                correlation = {}
                for col in corr_matrix.columns:
                    correlation[col] = {}
                    for idx, val in corr_matrix[col].items():
                        # 检查无限值和NaN
                        if np.isfinite(val):
                            correlation[col][idx] = float(val)
                        else:
                            correlation[col][idx] = None
        except Exception as e:
            print(f"处理相关性矩阵时出错: {str(e)}")
            pass
        
        # 处理头尾数据，确保序列化不出问题
        head_data = []
        for row in df.head(5).to_dict(orient="records"):
            processed_row = {}
            for k, v in row.items():
                if isinstance(v, (np.integer, np.floating)):
                    if np.isfinite(v):
                        processed_row[k] = float(v) if isinstance(v, np.floating) else int(v)
                    else:
                        processed_row[k] = None
                else:
                    processed_row[k] = str(v) if v is not None else None
            head_data.append(processed_row)
            
        tail_data = []
        for row in df.tail(5).to_dict(orient="records"):
            processed_row = {}
            for k, v in row.items():
                if isinstance(v, (np.integer, np.floating)):
                    if np.isfinite(v):
                        processed_row[k] = float(v) if isinstance(v, np.floating) else int(v)
                    else:
                        processed_row[k] = None
                else:
                    processed_row[k] = str(v) if v is not None else None
            tail_data.append(processed_row)
        
        return {
            "basic_info": basic_info,
            "column_stats": col_stats,
            "methods_by_category": all_methods,
            "describe": describe_data,
            "correlation": correlation,
            "head_data": head_data,
            "tail_data": tail_data
        }
    except Exception as e:
        import traceback
        error_detail = traceback.format_exc()
        print(f"获取DataFrame信息错误: {error_detail}")
        raise HTTPException(status_code=400, detail=f"无法获取DataFrame信息: {str(e)}")

# 分组数据
@router_module_real.post("/group-data")
async def group_data(file_path: str = Form(...), group_by: str = Form(...), 
                  agg_column: str = Form(...), agg_method: str = Form("mean")):
    """
    对数据进行分组聚合
    """
    try:
        handler = spdf.DataFrameHandler()
        df = handler.load_file(file_path)
        
        # 创建聚合字典
        agg_dict = {agg_column: agg_method}
        
        # 分组聚合
        result_df = handler.group_data(df, [group_by], agg_dict)
        
        # 将结果转换为JSON
        return result_df.to_dict(orient="records")
    except Exception as e:
        raise HTTPException(status_code=400, detail=f"分组聚合失败: {str(e)}")

# 可视化数据
@router_module_real.post("/visualize", response_class=HTMLResponse)
async def visualize_data(request: Request, visualization: VisualizationRequest):
    """
    创建数据可视化
    """
    try:
        handler = spdf.DataFrameHandler()
        # 加载文件
        try:
            df = handler.load_file(visualization.file_path)
        except Exception as e:
            raise HTTPException(status_code=400, detail=f"文件加载失败: {str(e)}")
        
        # 检查列是否存在
        if visualization.x_column not in df.columns:
            raise HTTPException(status_code=400, detail=f"X轴列 '{visualization.x_column}' 在数据中不存在")
        
        if visualization.y_column and visualization.y_column not in df.columns:
            raise HTTPException(status_code=400, detail=f"Y轴列 '{visualization.y_column}' 在数据中不存在")
        
        if visualization.y_columns:
            missing_cols = [col for col in visualization.y_columns if col not in df.columns]
            if missing_cols:
                raise HTTPException(status_code=400, detail=f"以下Y轴列在数据中不存在: {', '.join(missing_cols)}")
        
        # 处理空数据
        if df.empty:
            return HTMLResponse(content=f"<div class='alert alert-warning'>数据为空，无法生成图表</div>")
        
        # 检查选定列的非空值
        if df[visualization.x_column].isna().all():
            raise HTTPException(status_code=400, detail=f"X轴列 '{visualization.x_column}' 全为空值")
        
        if visualization.y_column and df[visualization.y_column].isna().all():
            raise HTTPException(status_code=400, detail=f"Y轴列 '{visualization.y_column}' 全为空值")
            
        # 基于图表类型生成可视化
        try:
            if visualization.plot_type == "bar":
                img_base64 = handler.plot_bar(df, visualization.x_column, visualization.y_column, 
                                        visualization.title or "", 
                                        visualization.y_label or "数量")
            elif visualization.plot_type == "line":
                y_cols = visualization.y_columns or [visualization.y_column]
                img_base64 = handler.plot_line(df, visualization.x_column, y_cols, 
                                        visualization.title or "")
            elif visualization.plot_type == "pie":
                img_base64 = handler.plot_pie(df, visualization.y_column, visualization.x_column, 
                                        visualization.title or "")
            elif visualization.plot_type == "scatter":
                img_base64 = handler.plot_scatter(df, visualization.x_column, visualization.y_column, 
                                            visualization.title or "")
            elif visualization.plot_type == "heatmap":
                img_base64 = handler.correlation_heatmap(df, visualization.title or "相关性热力图")
            else:
                raise HTTPException(status_code=400, detail=f"不支持的图表类型: {visualization.plot_type}")
        except Exception as e:
            # 捕获可视化过程中的具体错误
            import traceback
            error_detail = f"{str(e)}\n{traceback.format_exc()}"
            print(f"可视化错误: {error_detail}")
            raise HTTPException(status_code=400, detail=f"生成{visualization.plot_type}图表时出错: {str(e)}")
        
        # 返回HTML图像
        html_img = handler.plot_to_html(img_base64)
        return HTMLResponse(content=html_img)
    except HTTPException:
        # 重新抛出HTTP异常
        raise
    except Exception as e:
        # 捕获其他所有错误
        import traceback
        error_detail = f"{str(e)}\n{traceback.format_exc()}"
        print(f"可视化意外错误: {error_detail}")
        raise HTTPException(status_code=500, detail=f"可视化生成失败: {str(e)}")

# 创建分析报告
@router_module_real.get("/analyze-csv/{file_path:path}", response_class=HTMLResponse)
async def analyze_csv(request: Request, file_path: str):
    """
    分析CSV文件并生成报告
    """
    try:
        # 计算相对路径
        if not file_path.startswith("static/uploads/"):
            file_path = os.path.join("static", "uploads", os.path.basename(file_path))
        
        # 执行分析
        results = spdf.analyze_csv_example(file_path)
        
        if "error" in results:
            raise HTTPException(status_code=400, detail=results["error"])
        
        # 创建HTML响应
        html_content = f"""
        <!DOCTYPE html>
        <html>
        <head>
            <title>数据分析报告</title>
            <meta charset="UTF-8">
            <style>
                body {{ font-family: Arial, sans-serif; margin: 20px; }}
                h1 {{ color: #2c3e50; }}
                h2 {{ color: #3498db; margin-top: 30px; }}
                .info {{ background-color: #f8f9fa; padding: 15px; border-radius: 5px; }}
                .visualization {{ margin-top: 20px; }}
            </style>
        </head>
        <body>
            <h1>数据分析报告</h1>
            <h2>数据基本信息</h2>
            <div class="info">
                <pre>{results["info"]}</pre>
            </div>
            
            <h2>数据可视化</h2>
        """
        
        # 添加可视化图表
        for name, vis_html in results["visualizations"].items():
            html_content += f"""
            <div class="visualization">
                <h3>{name.capitalize()}</h3>
                {vis_html}
            </div>
            """
        
        html_content += """
        </body>
        </html>
        """
        
        return HTMLResponse(content=html_content)
    except Exception as e:
        raise HTTPException(status_code=400, detail=f"分析失败: {str(e)}")

# 时间序列分析
@router_module_real.post("/time-series", response_class=HTMLResponse)
async def time_series_analysis(file_path: str = Form(...), date_column: str = Form(...),
                           value_column: str = Form(...), freq: str = Form("M")):
    """
    执行时间序列分析
    """
    try:
        handler = spdf.DataFrameHandler()
        df = handler.load_file(file_path)
        
        img_base64 = handler.time_series_analysis(df, date_column, value_column, freq)
        html_img = handler.plot_to_html(img_base64)
        
        return HTMLResponse(content=html_img)
    except Exception as e:
        raise HTTPException(status_code=400, detail=f"时间序列分析失败: {str(e)}")

# 筛选数据
@router_module_real.post("/filter-data")
async def filter_data(filter_req: FilterRequest):
    """
    根据条件筛选数据
    """
    try:
        handler = spdf.DataFrameHandler()
        
        # 加载文件
        try:
            df = handler.load_file(filter_req.file_path)
        except Exception as e:
            raise HTTPException(status_code=400, detail=f"文件加载失败: {str(e)}")
        
        # 原始数据行数
        original_rows = len(df)
        
        # 打印数据类型信息，用于调试
        print("数据类型信息:")
        for col in df.columns:
            print(f"列 '{col}': {df[col].dtype}")
        
        # 应用筛选条件
        for condition in filter_req.conditions:
            # 确认列存在
            if condition.column not in df.columns:
                raise HTTPException(status_code=400, detail=f"列 '{condition.column}' 在数据中不存在")
            
            try:
                # 获取列的数据类型
                col_type = df[condition.column].dtype
                
                # 尝试将值转换为适当的类型
                converted_value = condition.value
                if pd.api.types.is_numeric_dtype(col_type):
                    try:
                        converted_value = float(condition.value)
                    except ValueError:
                        raise HTTPException(status_code=400, 
                                          detail=f"无法将值 '{condition.value}' 转换为 '{condition.column}' 列的数值类型")
                
                # 如果是between操作符，也需要转换第二个值
                converted_value2 = None
                if condition.operator == "between" and condition.value2 is not None:
                    if pd.api.types.is_numeric_dtype(col_type):
                        try:
                            converted_value2 = float(condition.value2)
                        except ValueError:
                            raise HTTPException(status_code=400, 
                                             detail=f"无法将值 '{condition.value2}' 转换为 '{condition.column}' 列的数值类型")
                    else:
                        converted_value2 = condition.value2
                
                # 根据操作符应用不同的筛选条件
                if condition.operator == "eq":  # 等于
                    df = df[df[condition.column] == converted_value]
                elif condition.operator == "gt":  # 大于
                    df = df[df[condition.column] > converted_value]
                elif condition.operator == "lt":  # 小于
                    df = df[df[condition.column] < converted_value]
                elif condition.operator == "ge":  # 大于等于
                    df = df[df[condition.column] >= converted_value]
                elif condition.operator == "le":  # 小于等于
                    df = df[df[condition.column] <= converted_value]
                elif condition.operator == "contains":  # 包含
                    # 确保是字符串类型
                    df = df[df[condition.column].astype(str).str.contains(str(converted_value), na=False)]
                elif condition.operator == "between":  # 介于两个值之间
                    if converted_value2 is not None:
                        df = df[(df[condition.column] >= converted_value) & (df[condition.column] <= converted_value2)]
                    else:
                        raise HTTPException(status_code=400, detail=f"between操作符需要提供value2参数")
                else:
                    raise HTTPException(status_code=400, detail=f"不支持的操作符: {condition.operator}")
                
                print(f"应用筛选条件后的行数: {len(df)}")
                
            except HTTPException:
                raise
            except Exception as e:
                import traceback
                error_detail = traceback.format_exc()
                print(f"筛选错误详情: {error_detail}")
                raise HTTPException(status_code=400, detail=f"筛选条件应用失败: {str(e)}")
        
        # 筛选后的数据行数
        filtered_rows = len(df)
        
        # 生成结果摘要
        column_summary = {}
        for col in df.columns:
            if pd.api.types.is_numeric_dtype(df[col]):
                column_summary[col] = {
                    "min": float(df[col].min()) if not pd.isna(df[col].min()) else None,
                    "max": float(df[col].max()) if not pd.isna(df[col].max()) else None,
                    "mean": float(df[col].mean()) if not pd.isna(df[col].mean()) else None,
                    "unique_values": len(df[col].unique())
                }
            else:
                # 对于非数值类型，统计唯一值数量和最常见的几个值
                value_counts = df[col].value_counts().head(5).to_dict()
                column_summary[col] = {
                    "unique_values": len(df[col].unique()),
                    "most_common": value_counts
                }
        
        # 返回筛选结果
        return {
            "success": True,
            "original_rows": original_rows,
            "filtered_rows": filtered_rows,
            "columns": df.columns.tolist(),
            "column_summary": column_summary,
            "sample_data": df.head(5).to_dict(orient="records")
        }
    except HTTPException:
        raise
    except Exception as e:
        import traceback
        error_detail = traceback.format_exc()
        print(f"筛选错误详情: {error_detail}")
        raise HTTPException(status_code=500, detail=f"筛选失败: {str(e)}")

# 保存筛选结果
@router_module_real.post("/save-filtered-data")
async def save_filtered_data(filter_req: FilterRequest, export_format: str = "xlsx"):
    """
    保存筛选后的数据为新文件
    """
    try:
        handler = spdf.DataFrameHandler()
        
        # 加载文件
        try:
            df = handler.load_file(filter_req.file_path)
        except Exception as e:
            raise HTTPException(status_code=400, detail=f"文件加载失败: {str(e)}")
        
        # 打印数据类型信息，用于调试
        print("保存筛选数据 - 数据类型信息:")
        for col in df.columns:
            print(f"列 '{col}': {df[col].dtype}")
        
        # 应用筛选条件
        for condition in filter_req.conditions:
            if condition.column not in df.columns:
                raise HTTPException(status_code=400, detail=f"列 '{condition.column}' 在数据中不存在")
            
            try:
                # 获取列的数据类型
                col_type = df[condition.column].dtype
                
                # 尝试将值转换为适当的类型
                converted_value = condition.value
                if pd.api.types.is_numeric_dtype(col_type):
                    try:
                        converted_value = float(condition.value) if isinstance(condition.value, str) else condition.value
                    except ValueError:
                        raise HTTPException(status_code=400, 
                                         detail=f"无法将值 '{condition.value}' 转换为 '{condition.column}' 列的数值类型")
                
                # 如果是between操作符，也需要转换第二个值
                converted_value2 = None
                if condition.operator == "between" and condition.value2 is not None:
                    if pd.api.types.is_numeric_dtype(col_type):
                        try:
                            converted_value2 = float(condition.value2) if isinstance(condition.value2, str) else condition.value2
                        except ValueError:
                            raise HTTPException(status_code=400, 
                                            detail=f"无法将值 '{condition.value2}' 转换为 '{condition.column}' 列的数值类型")
                    else:
                        converted_value2 = condition.value2
                
                # 根据操作符应用不同的筛选条件
                if condition.operator == "eq":  # 等于
                    df = df[df[condition.column] == converted_value]
                elif condition.operator == "gt":  # 大于
                    df = df[df[condition.column] > converted_value]
                elif condition.operator == "lt":  # 小于
                    df = df[df[condition.column] < converted_value]
                elif condition.operator == "ge":  # 大于等于
                    df = df[df[condition.column] >= converted_value]
                elif condition.operator == "le":  # 小于等于
                    df = df[df[condition.column] <= converted_value]
                elif condition.operator == "contains":  # 包含
                    # 确保是字符串类型
                    df = df[df[condition.column].astype(str).str.contains(str(converted_value), na=False)]
                elif condition.operator == "between":  # 介于两个值之间
                    if converted_value2 is not None:
                        df = df[(df[condition.column] >= converted_value) & (df[condition.column] <= converted_value2)]
                    else:
                        raise HTTPException(status_code=400, detail=f"between操作符需要提供value2参数")
                else:
                    raise HTTPException(status_code=400, detail=f"不支持的操作符: {condition.operator}")
                    
                print(f"保存筛选数据 - 应用条件后的行数: {len(df)}")
                
            except HTTPException:
                raise
            except Exception as e:
                import traceback
                error_detail = traceback.format_exc()
                print(f"保存筛选数据 - 错误详情: {error_detail}")
                raise HTTPException(status_code=400, detail=f"筛选条件应用失败: {str(e)}")
        
        # 生成新文件名
        original_filename = os.path.basename(filter_req.file_path)
        filename_without_ext, _ = os.path.splitext(original_filename)
        
        # 根据导出格式设置文件扩展名和保存路径
        if export_format.lower() == 'csv':
            new_filename = f"{filename_without_ext}_filtered_{random.randint(1000, 9999)}.csv"
            save_path = os.path.join("static", "downloads", new_filename)
            try:
                df.to_csv(save_path, index=False, encoding='utf-8')
            except Exception as e:
                import traceback
                error_detail = traceback.format_exc()
                print(f"CSV导出错误: {error_detail}")
                raise HTTPException(status_code=500, detail=f"导出CSV失败: {str(e)}")
        elif export_format.lower() == 'xls':
            new_filename = f"{filename_without_ext}_filtered_{random.randint(1000, 9999)}.xls"
            save_path = os.path.join("static", "downloads", new_filename)
            try:
                df.to_excel(save_path, index=False, engine='openpyxl')
            except Exception as e:
                import traceback
                error_detail = traceback.format_exc()
                print(f"XLS导出错误: {error_detail}")
                raise HTTPException(status_code=500, detail=f"导出XLS失败: {str(e)}")
        else:  # 默认为xlsx
            new_filename = f"{filename_without_ext}_filtered_{random.randint(1000, 9999)}.xlsx"
            save_path = os.path.join("static", "downloads", new_filename)
            try:
                df.to_excel(save_path, index=False, engine='openpyxl')
            except Exception as e:
                import traceback
                error_detail = traceback.format_exc()
                print(f"XLSX导出错误: {error_detail}")
                raise HTTPException(status_code=500, detail=f"导出XLSX失败: {str(e)}")
        
        # 确保下载目录存在
        os.makedirs(os.path.join("static", "downloads"), exist_ok=True)
        
        # 生成下载URL
        download_url = f"/static/downloads/{new_filename}"
        
        # 返回新文件信息
        return {
            "success": True,
            "original_file": filter_req.file_path,
            "filtered_file": save_path,
            "download_url": download_url,
            "rows": len(df),
            "columns": df.columns.tolist()
        }
    except HTTPException:
        raise
    except Exception as e:
        import traceback
        error_detail = traceback.format_exc()
        print(f"保存筛选数据 - 异常详情: {error_detail}")
        raise HTTPException(status_code=500, detail=f"保存筛选数据失败: {str(e)}")

@router_module_real.post("/visualize-filtered", response_class=HTMLResponse)
async def visualize_filtered_data(request: Request, filter_visualization: dict):
    """
    基于筛选结果创建数据可视化
    """
    try:
        handler = spdf.DataFrameHandler()
        
        # 解析请求参数
        filter_req = FilterRequest(**filter_visualization["filter_params"])
        visualization = VisualizationRequest(**filter_visualization["visualization_params"])
        
        # 加载文件
        try:
            df = handler.load_file(filter_req.file_path)
        except Exception as e:
            raise HTTPException(status_code=400, detail=f"文件加载失败: {str(e)}")
        
        # 检查列是否存在
        if visualization.x_column not in df.columns:
            raise HTTPException(status_code=400, detail=f"X轴列 '{visualization.x_column}' 在数据中不存在")
        
        if visualization.y_column and visualization.y_column not in df.columns:
            raise HTTPException(status_code=400, detail=f"Y轴列 '{visualization.y_column}' 在数据中不存在")
        
        if visualization.y_columns:
            missing_cols = [col for col in visualization.y_columns if col not in df.columns]
            if missing_cols:
                raise HTTPException(status_code=400, detail=f"以下Y轴列在数据中不存在: {', '.join(missing_cols)}")
        
        # 应用筛选条件
        for condition in filter_req.conditions:
            # 确认列存在
            if condition.column not in df.columns:
                raise HTTPException(status_code=400, detail=f"列 '{condition.column}' 在数据中不存在")
            
            try:
                # 获取列的数据类型
                col_type = df[condition.column].dtype
                
                # 尝试将值转换为适当的类型
                converted_value = condition.value
                if pd.api.types.is_numeric_dtype(col_type):
                    try:
                        converted_value = float(condition.value) if isinstance(condition.value, str) else condition.value
                    except ValueError:
                        raise HTTPException(status_code=400, 
                                         detail=f"无法将值 '{condition.value}' 转换为 '{condition.column}' 列的数值类型")
                
                # 如果是between操作符，也需要转换第二个值
                converted_value2 = None
                if condition.operator == "between" and condition.value2 is not None:
                    if pd.api.types.is_numeric_dtype(col_type):
                        try:
                            converted_value2 = float(condition.value2) if isinstance(condition.value2, str) else condition.value2
                        except ValueError:
                            raise HTTPException(status_code=400, 
                                            detail=f"无法将值 '{condition.value2}' 转换为 '{condition.column}' 列的数值类型")
                    else:
                        converted_value2 = condition.value2
                
                # 根据操作符应用不同的筛选条件
                if condition.operator == "eq":  # 等于
                    df = df[df[condition.column] == converted_value]
                elif condition.operator == "gt":  # 大于
                    df = df[df[condition.column] > converted_value]
                elif condition.operator == "lt":  # 小于
                    df = df[df[condition.column] < converted_value]
                elif condition.operator == "ge":  # 大于等于
                    df = df[df[condition.column] >= converted_value]
                elif condition.operator == "le":  # 小于等于
                    df = df[df[condition.column] <= converted_value]
                elif condition.operator == "contains":  # 包含
                    # 确保是字符串类型
                    df = df[df[condition.column].astype(str).str.contains(str(converted_value), na=False)]
                elif condition.operator == "between":  # 介于两个值之间
                    if converted_value2 is not None:
                        df = df[(df[condition.column] >= converted_value) & (df[condition.column] <= converted_value2)]
                    else:
                        raise HTTPException(status_code=400, detail=f"between操作符需要提供value2参数")
                else:
                    raise HTTPException(status_code=400, detail=f"不支持的操作符: {condition.operator}")
                
            except HTTPException:
                raise
            except Exception as e:
                import traceback
                error_detail = traceback.format_exc()
                print(f"筛选可视化 - 筛选错误详情: {error_detail}")
                raise HTTPException(status_code=400, detail=f"筛选条件应用失败: {str(e)}")
        
        # 处理空数据
        if df.empty:
            return HTMLResponse(content=f"<div class='alert alert-warning'>筛选后数据为空，无法生成图表</div>")
        
        # 检查选定列的非空值
        if df[visualization.x_column].isna().all():
            raise HTTPException(status_code=400, detail=f"筛选后，X轴列 '{visualization.x_column}' 全为空值")
        
        if visualization.y_column and df[visualization.y_column].isna().all():
            raise HTTPException(status_code=400, detail=f"筛选后，Y轴列 '{visualization.y_column}' 全为空值")
        
        # 为筛选后的可视化添加筛选标记到标题
        filtered_title = f"{visualization.title or ''} (已筛选)"
        
        # 基于图表类型生成可视化
        try:
            if visualization.plot_type == "bar":
                img_base64 = handler.plot_bar(df, visualization.x_column, visualization.y_column, filtered_title, 
                                         visualization.y_label or "数量")
            elif visualization.plot_type == "line":
                y_cols = visualization.y_columns or [visualization.y_column]
                img_base64 = handler.plot_line(df, visualization.x_column, y_cols, filtered_title)
            elif visualization.plot_type == "pie":
                img_base64 = handler.plot_pie(df, visualization.y_column, visualization.x_column, filtered_title)
            elif visualization.plot_type == "scatter":
                img_base64 = handler.plot_scatter(df, visualization.x_column, visualization.y_column, filtered_title)
            elif visualization.plot_type == "heatmap":
                img_base64 = handler.correlation_heatmap(df, f"{filtered_title or '相关性热力图'}")
            else:
                raise HTTPException(status_code=400, detail=f"不支持的图表类型: {visualization.plot_type}")
        except Exception as e:
            # 捕获可视化过程中的具体错误
            import traceback
            error_detail = f"{str(e)}\n{traceback.format_exc()}"
            print(f"筛选可视化错误: {error_detail}")
            raise HTTPException(status_code=400, detail=f"生成筛选后的{visualization.plot_type}图表时出错: {str(e)}")
        
        # 返回HTML图像
        html_img = handler.plot_to_html(img_base64)
        return HTMLResponse(content=html_img)
    except HTTPException:
        # 重新抛出HTTP异常
        raise
    except Exception as e:
        # 捕获其他所有错误
        import traceback
        error_detail = f"{str(e)}\n{traceback.format_exc()}"
        print(f"筛选可视化意外错误: {error_detail}")
        raise HTTPException(status_code=500, detail=f"筛选后可视化生成失败: {str(e)}")

@router_module_real.post("/export-filtered-data")
async def export_filtered_data(filter_req: FilterRequest, export_format: str = "xlsx"):
    """
    导出筛选后的数据为指定格式的文件
    """
    try:
        handler = spdf.DataFrameHandler()
        
        # 加载文件
        try:
            df = handler.load_file(filter_req.file_path)
        except Exception as e:
            raise HTTPException(status_code=400, detail=f"文件加载失败: {str(e)}")
        
        # 打印数据类型信息，用于调试
        print("导出筛选数据 - 数据类型信息:")
        for col in df.columns:
            print(f"列 '{col}': {df[col].dtype}")
        
        # 应用筛选条件
        for condition in filter_req.conditions:
            if condition.column not in df.columns:
                raise HTTPException(status_code=400, detail=f"列 '{condition.column}' 在数据中不存在")
            
            try:
                # 获取列的数据类型
                col_type = df[condition.column].dtype
                
                # 尝试将值转换为适当的类型
                converted_value = condition.value
                if pd.api.types.is_numeric_dtype(col_type):
                    try:
                        converted_value = float(condition.value) if isinstance(condition.value, str) else condition.value
                    except ValueError:
                        raise HTTPException(status_code=400, 
                                         detail=f"无法将值 '{condition.value}' 转换为 '{condition.column}' 列的数值类型")
                
                # 如果是between操作符，也需要转换第二个值
                converted_value2 = None
                if condition.operator == "between" and condition.value2 is not None:
                    if pd.api.types.is_numeric_dtype(col_type):
                        try:
                            converted_value2 = float(condition.value2) if isinstance(condition.value2, str) else condition.value2
                        except ValueError:
                            raise HTTPException(status_code=400, 
                                            detail=f"无法将值 '{condition.value2}' 转换为 '{condition.column}' 列的数值类型")
                    else:
                        converted_value2 = condition.value2
                
                # 根据操作符应用不同的筛选条件
                if condition.operator == "eq":  # 等于
                    df = df[df[condition.column] == converted_value]
                elif condition.operator == "gt":  # 大于
                    df = df[df[condition.column] > converted_value]
                elif condition.operator == "lt":  # 小于
                    df = df[df[condition.column] < converted_value]
                elif condition.operator == "ge":  # 大于等于
                    df = df[df[condition.column] >= converted_value]
                elif condition.operator == "le":  # 小于等于
                    df = df[df[condition.column] <= converted_value]
                elif condition.operator == "contains":  # 包含
                    # 确保是字符串类型
                    df = df[df[condition.column].astype(str).str.contains(str(converted_value), na=False)]
                elif condition.operator == "between":  # 介于两个值之间
                    if converted_value2 is not None:
                        df = df[(df[condition.column] >= converted_value) & (df[condition.column] <= converted_value2)]
                    else:
                        raise HTTPException(status_code=400, detail=f"between操作符需要提供value2参数")
                else:
                    raise HTTPException(status_code=400, detail=f"不支持的操作符: {condition.operator}")
                    
                print(f"导出筛选数据 - 应用条件后的行数: {len(df)}")
                
            except HTTPException:
                raise
            except Exception as e:
                import traceback
                error_detail = traceback.format_exc()
                print(f"导出筛选数据 - 错误详情: {error_detail}")
                raise HTTPException(status_code=400, detail=f"筛选条件应用失败: {str(e)}")
        
        # 生成新文件名
        original_filename = os.path.basename(filter_req.file_path)
        filename_without_ext, _ = os.path.splitext(original_filename)
        
        # 确保下载目录存在
        os.makedirs(os.path.join("static", "downloads"), exist_ok=True)
        
        # 根据导出格式设置文件扩展名和保存路径
        if export_format.lower() == 'csv':
            new_filename = f"{filename_without_ext}_filtered_{random.randint(1000, 9999)}.csv"
            save_path = os.path.join("static", "downloads", new_filename)
            try:
                df.to_csv(save_path, index=False, encoding='utf-8')
            except Exception as e:
                import traceback
                error_detail = traceback.format_exc()
                print(f"CSV导出错误: {error_detail}")
                raise HTTPException(status_code=500, detail=f"导出CSV失败: {str(e)}")
        elif export_format.lower() == 'xls':
            new_filename = f"{filename_without_ext}_filtered_{random.randint(1000, 9999)}.xls"
            save_path = os.path.join("static", "downloads", new_filename)
            try:
                df.to_excel(save_path, index=False, engine='openpyxl')
            except Exception as e:
                import traceback
                error_detail = traceback.format_exc()
                print(f"XLS导出错误: {error_detail}")
                raise HTTPException(status_code=500, detail=f"导出XLS失败: {str(e)}")
        else:  # 默认为xlsx
            new_filename = f"{filename_without_ext}_filtered_{random.randint(1000, 9999)}.xlsx"
            save_path = os.path.join("static", "downloads", new_filename)
            try:
                df.to_excel(save_path, index=False, engine='openpyxl')
            except Exception as e:
                import traceback
                error_detail = traceback.format_exc()
                print(f"XLSX导出错误: {error_detail}")
                raise HTTPException(status_code=500, detail=f"导出XLSX失败: {str(e)}")
        
        # 生成下载URL
        download_url = f"/static/downloads/{new_filename}"
        
        # 返回新文件信息
        return {
            "success": True,
            "original_file": filter_req.file_path,
            "filtered_file": save_path,
            "download_url": download_url,
            "rows": len(df),
            "columns": df.columns.tolist()
        }
    except HTTPException:
        raise
    except Exception as e:
        import traceback
        error_detail = traceback.format_exc()
        print(f"导出筛选数据 - 异常详情: {error_detail}")
        raise HTTPException(status_code=500, detail=f"导出筛选数据失败: {str(e)}")