#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Data processing component
"""

import json
import pandas as pd
from typing import Dict, Any, Optional, List
from data_engine.models.data_models import Dataset, ChartAnalysis, ProcessingResult
from data_engine.config import ConfigManager
from data_engine.utils.code_executor import SafeCodeExecutor
from data_engine.utils.logger import engine_logger


class DataProcessor:
    """数据处理器"""
    
    def __init__(self, llm_client, safe_execution: bool = True):
        """
        初始化数据处理器
        
        Args:
            llm_client: LiteLLM客户端实例
            safe_execution: 是否启用安全执行模式
        """
        self.llm_client = llm_client
        self.code_executor = SafeCodeExecutor()
        self.safe_execution = safe_execution
        self.config_manager = ConfigManager()
    
    async def process_data(self, dataset: Dataset, analysis: ChartAnalysis) -> ProcessingResult:
        """
        处理数据 - 优化版本：智能执行顺序优先，失败时才使用LLM生成代码
        
        Args:
            dataset: 数据集
            analysis: 图表分析结果
            
        Returns:
            ProcessingResult: 数据处理结果
        """
        engine_logger.info("开始数据处理")
        import time
        start_time = time.time()
        
        try:
            # 第一优先级：使用智能执行顺序的模板方法（最快）
            result = self._process_with_templates(dataset, analysis)
            
            if result.success:
                execution_time = time.time() - start_time
                engine_logger.info(f"智能模板处理成功，耗时: {execution_time:.3f}秒")
                engine_logger.log_data_info(result.processed_df.shape, "处理后")
                
                # 更新字段映射
                self._update_field_mapping(analysis, result.processed_df)
                
                # 更新执行时间
                result.execution_time = execution_time
                return result
            else:
                engine_logger.warning(f"智能模板处理失败: {result.error_message}")
                
                # 第二优先级：使用LLM生成代码（较慢但更灵活）
                engine_logger.info("尝试使用LLM生成代码方式...")
                processing_code = await self._generate_processing_code(dataset, analysis)
                
                # 执行处理代码
                success, processed_df, error_msg, llm_execution_time = self.code_executor.execute_code(
                    processing_code, dataset.df
                )
                
                total_execution_time = time.time() - start_time
                
                if success:
                    engine_logger.info(f"LLM代码生成方法成功，总耗时: {total_execution_time:.3f}秒")
                    engine_logger.log_data_info(processed_df.shape, "处理后")
                    
                    # 更新字段映射
                    self._update_field_mapping(analysis, processed_df)
                    
                    return ProcessingResult(
                        processed_df=processed_df,
                        processing_code=processing_code,
                        execution_time=total_execution_time,
                        success=True
                    )
                else:
                    engine_logger.error(f"LLM方法也失败: {error_msg}")
                
                return ProcessingResult(
                    processed_df=dataset.df,
                    processing_code=processing_code,
                    execution_time=total_execution_time,
                    success=False,
                    error_message=error_msg
                )
                
        except Exception as e:
            total_execution_time = time.time() - start_time
            engine_logger.error(f"数据处理异常: {str(e)}")
            return ProcessingResult(
                processed_df=dataset.df,
                processing_code="",
                execution_time=total_execution_time,
                success=False,
                error_message=str(e)
            )
    
    async def _generate_processing_code(self, dataset: Dataset, analysis: ChartAnalysis) -> str:
        """生成数据处理代码"""
        try:
            # 尝试使用模板生成简单代码
            template_code = self._generate_template_code(analysis)
            if template_code:
                return template_code
            
            # 复杂情况使用LLM
            return await self._generate_llm_code(dataset, analysis)
            
        except Exception as e:
            engine_logger.warning(f"代码生成失败: {str(e)}")
            return "# 处理失败，保持原数据\npass"
    
    async def _generate_llm_code(self, dataset: Dataset, analysis: ChartAnalysis) -> str:
        """使用LLM生成处理代码"""
        requirements = self._build_requirements_list(analysis)
        
        template = self.config_manager.get_prompt_template("data_processing")
        prompt = template.format(
            data_shape=dataset.df.shape,
            columns=list(dataset.df.columns),
            dtypes=dict(dataset.df.dtypes.astype(str)),
            processing_requirements="\n".join(f"- {req}" for req in requirements)
        )
        
        response = await self.llm_client.acompletion(
            messages=[{"role": "user", "content": prompt}]
        )
        
        code = self.llm_client.get_response_content(response).strip()
        return self._clean_code_format(code)
    
    def _build_requirements_list(self, analysis: ChartAnalysis) -> List[str]:
        """构建处理需求列表"""
        requirements = []
        
        # 添加筛选需求
        if analysis.filter_condition:
            requirements.append(f"筛选条件: {analysis.filter_condition}")
        
        # 添加分组聚合需求
        if analysis.group_by and analysis.aggregation:
            requirements.append(self._build_aggregation_requirement(analysis))
        
        # 添加排序需求
        if analysis.sort_by:
            order = "升序" if analysis.sort_order == "asc" else "降序"
            requirements.append(f"按 {analysis.sort_by} {order}排序")
        
        # 添加限制需求
        if analysis.limit:
            requirements.append(f"限制显示前 {analysis.limit} 行")
        
        # 添加字段保留需求
        requirements.extend(self._build_field_requirements(analysis))
        
        return requirements if requirements else ["保持原始数据结构"]
    
    def _build_aggregation_requirement(self, analysis: ChartAnalysis) -> str:
        """构建聚合需求"""
        group_field = analysis.group_by
        agg_func = analysis.aggregation
        
        # 确定聚合字段
        if analysis.y_fields:
            value_fields = ", ".join(analysis.y_fields)
        elif analysis.y_field:
            value_fields = analysis.y_field
        else:
            return f"按 {group_field} 分组进行 {agg_func} 聚合"
        
        return f"按 {group_field} 分组，对 {value_fields} 进行 {agg_func} 聚合"
    
    def _build_field_requirements(self, analysis: ChartAnalysis) -> List[str]:
        """构建字段保留需求"""
        requirements = []
        
        if analysis.chart_type == "pie":
            if analysis.name_field and analysis.value_field:
                requirements.append(f"确保包含 {analysis.name_field} 和 {analysis.value_field} 字段")
        
        elif analysis.chart_type in ["line", "bar", "scatter", "area"]:
            if analysis.x_field and (analysis.y_field or analysis.y_fields):
                y_info = analysis.y_field if analysis.y_field else ", ".join(analysis.y_fields)
                requirements.append(f"确保包含 {analysis.x_field} 和 {y_info} 字段")
        
        return requirements
    
    def _generate_template_code(self, analysis: ChartAnalysis) -> str:
        """使用模板生成处理代码"""
        code_parts = []
        
        # 筛选
        if analysis.filter_condition:
            template = self.config_manager.get_processing_template("filter")
            code_parts.append(template.format(condition=analysis.filter_condition))
        
        # 分组聚合
        if analysis.aggregation:
            if analysis.group_by:
                # 有明确分组字段
                code_parts.append(self._get_aggregation_code(analysis))
            elif (analysis.chart_type == "line" and analysis.x_field and 
                  ("month" in analysis.x_field.lower() or "date" in analysis.x_field.lower() or "time" in analysis.x_field.lower())):
                # 时间序列但无额外分组：按时间字段聚合
                if analysis.y_field:
                    code_parts.append(f"df = df.groupby('{analysis.x_field}')['{analysis.y_field}'].{analysis.aggregation}().reset_index()")
                elif analysis.y_fields:
                    agg_fields = analysis.y_fields
                    code_parts.append(f"df = df.groupby('{analysis.x_field}')[{agg_fields}].{analysis.aggregation}().reset_index()")
        
        # 排序
        if analysis.sort_by:
            ascending = analysis.sort_order == "asc"
            template = self.config_manager.get_processing_template("sort_asc" if ascending else "sort_desc")
            code_parts.append(template.format(field=analysis.sort_by))
        
        # 限制行数
        if analysis.limit:
            template = self.config_manager.get_processing_template("limit")
            code_parts.append(template.format(limit=analysis.limit))
        
        return "\n".join(code_parts) if code_parts else ""
    
    def _extract_filter_field(self, filter_condition: str) -> str:
        """从筛选条件中提取字段名"""
        import re
        # 简单的字段名提取，匹配 field_name > value 或 field_name < value 等模式
        match = re.match(r'(\w+)\s*[><=!]+', filter_condition)
        if match:
            return match.group(1)
        return None
    
    def _adjust_filter_condition(self, filter_condition: str, agg_field_used: str) -> str:
        """调整筛选条件以适应聚合后的字段名"""
        if not agg_field_used:
            return filter_condition
        
        # 如果筛选条件中的字段名与聚合字段匹配，保持不变
        # 这里可以根据需要添加更复杂的逻辑
        return filter_condition
    
    def _determine_sort_field(self, sort_by: str, available_columns: list, agg_field_used: str) -> str:
        """智能确定排序字段"""
        # 首先检查原字段名是否存在
        if sort_by in available_columns:
            return sort_by
        
        # 如果原字段不存在，检查是否是聚合后的字段
        if agg_field_used and agg_field_used in available_columns:
            return agg_field_used
        
        # 尝试查找相似的字段
        for col in available_columns:
            if sort_by.lower() in col.lower() or col.lower() in sort_by.lower():
                return col
        
        # 如果都没找到，检查是否有分组字段可以排序
        group_fields = ['department', 'category', 'region', 'type', 'group']
        for col in available_columns:
            if any(group_field in col.lower() for group_field in group_fields):
                return col
        
        # 最后返回第一个可用字段（优先数值字段）
        numeric_cols = []
        text_cols = []
        for col in available_columns:
            # 简单判断是否可能是数值字段
            if any(keyword in col.lower() for keyword in ['count', 'sum', 'avg', 'mean', 'total', 'amount', 'value', 'revenue', 'sales', 'price']):
                numeric_cols.append(col)
            else:
                text_cols.append(col)
        
        # 优先返回数值字段
        if numeric_cols:
            return numeric_cols[0]
        elif text_cols:
            return text_cols[0]
        
        return None
    
    def _needs_data_copy(self, analysis: ChartAnalysis) -> bool:
        """判断是否需要复制数据 - 性能优化"""
        # 如果有分组聚合操作，需要复制数据
        if analysis.group_by and analysis.aggregation:
            return True
        
        # 如果有复杂的筛选条件，需要复制数据
        if analysis.filter_condition and ('>' in analysis.filter_condition or '<' in analysis.filter_condition):
            return True
        
        # 简单的排序和限制操作可以不复制数据
        if only_sort_and_limit := (analysis.sort_by and analysis.limit and not analysis.filter_condition and not analysis.group_by):
            return False
        
        # 默认复制数据以确保安全
        return True
    
    def _select_aggregation_field(self, analysis: ChartAnalysis, df) -> str:
        """智能选择聚合字段 - 性能优化版本"""
        # 优先使用明确指定的y_field
        if analysis.y_field and analysis.y_field in df.columns:
            return analysis.y_field
        
        # 其次使用y_fields中的第一个可用字段
        if analysis.y_fields:
            for field in analysis.y_fields:
                if field in df.columns:
                    return field
        
        # 最后自动选择数值字段
        numeric_cols = df.select_dtypes(include=['number']).columns.tolist()
        # 排除分组字段
        available_cols = [col for col in numeric_cols if col != analysis.group_by]
        return available_cols[0] if available_cols else None
    
    def _get_aggregation_code(self, analysis: ChartAnalysis) -> str:
        """生成聚合代码"""
        # 修复：处理时间序列的多重分组
        if (analysis.chart_type == "line" and analysis.x_field and 
            analysis.group_by and analysis.group_by != analysis.x_field and
            ("month" in analysis.x_field.lower() or "date" in analysis.x_field.lower() or "time" in analysis.x_field.lower())):
            # 时间序列多重分组：[时间字段, 其他分组字段]
            group_fields = [analysis.x_field, analysis.group_by]
            agg_func = analysis.aggregation
            
            if analysis.y_field:
                return f"df = df.groupby({group_fields})['{analysis.y_field}'].{agg_func}().reset_index()"
            else:
                # 默认计数聚合
                return f"df = df.groupby({group_fields}).size().reset_index(name='count')"
        else:
            # 常规单字段分组
            group_field = analysis.group_by
            agg_func = analysis.aggregation
            
            # 智能选择聚合字段
            if analysis.y_field:
                # 有明确指定的y_field
                template = self.config_manager.get_processing_template(f"group_{agg_func}")
                return template.format(group_field=group_field, value_field=analysis.y_field)
            elif analysis.y_fields:
                # 有多个y_fields
                agg_fields = analysis.y_fields
                return f"df = df.groupby('{group_field}')[{agg_fields}].{agg_func}().reset_index()"
            else:
                # 没有明确指定聚合字段，需要智能选择
                if agg_func == 'count':
                    # 计数聚合不需要特定字段
                    template = self.config_manager.get_processing_template("group_count")
                    return template.format(group_field=group_field)
                else:
                    # 其他聚合函数需要数值字段，智能选择第一个数值字段
                    # 这里假设sales_amount是数值字段，实际应该动态检测
                    return f"df = df.groupby('{group_field}')['sales_amount'].{agg_func}().reset_index()"
    
    def _clean_code_format(self, code: str) -> str:
        """清理代码格式"""
        if code.startswith('```python'):
            code = code.replace('```python', '').replace('```', '').strip()
        elif code.startswith('```'):
            code = code.replace('```', '').strip()
        return code
    
    def _process_with_templates(self, dataset: Dataset, analysis: ChartAnalysis) -> ProcessingResult:
        """使用智能执行顺序的模板方法处理数据 - 性能优化版本"""
        try:
            # 性能优化：检查是否需要复制数据
            # 对于只读操作（如简单筛选、排序），可以避免复制
            needs_copy = self._needs_data_copy(analysis)
            df = dataset.df.copy() if needs_copy else dataset.df
            code_parts = []
            
            # 性能优化：只在调试模式下打印详细信息
            if engine_logger.logger.level <= 10:  # DEBUG level
                engine_logger.debug("\n===== 数据处理步骤识别结果 =====")
                engine_logger.debug(f"分组字段: {analysis.group_by}")
                engine_logger.debug(f"聚合方法: {analysis.aggregation}")
                engine_logger.debug(f"筛选条件: {analysis.filter_condition}")
                engine_logger.debug(f"排序字段: {analysis.sort_by} ({analysis.sort_order})")
                engine_logger.debug(f"限制行数: {analysis.limit}")
                engine_logger.debug(f"执行顺序: {analysis.execution_order}")
                engine_logger.debug("==============================\n")
            
            # 性能优化：根据数据复杂度选择执行策略
            if engine_logger.logger.level <= 10:  # DEBUG level
                engine_logger.debug("===== 开始执行数据处理步骤 =====")
                engine_logger.debug(f"原始数据形状: {df.shape}")
            
            # 性能优化：优先使用智能执行顺序（更高效）
            if analysis.execution_order and len(analysis.execution_order) > 0:
                df, code_parts = self._execute_smart_order(df, analysis, code_parts)
            else:
                # 回退到默认顺序
                engine_logger.debug("未识别到智能执行顺序，使用默认顺序")
                df, code_parts = self._execute_default_order(df, analysis, code_parts)
            
            # 去重代码行，避免重复执行
            unique_code_parts = []
            seen = set()
            for code in code_parts:
                if code not in seen:
                    unique_code_parts.append(code)
                    seen.add(code)
            
            processing_code = "\n".join(unique_code_parts) if unique_code_parts else "# 保持原始数据"
            
            if engine_logger.logger.level <= 10:  # DEBUG level
                engine_logger.debug("\n===== 数据处理完成 =====")
                engine_logger.debug(f"最终数据形状: {df.shape}")
                engine_logger.debug(f"生成的处理代码:")
                engine_logger.debug(processing_code)
                engine_logger.debug("========================\n")
            
            return ProcessingResult(
                processed_df=df,
                processing_code=processing_code,
                execution_time=0.0,
                success=True
            )
            
        except Exception as e:
            return ProcessingResult(
                processed_df=dataset.df,
                processing_code="",
                execution_time=0.0,
                success=False,
                error_message=f"模板处理失败: {str(e)}"
            )
    
    def validate_processing_result(self, original_df: pd.DataFrame, 
                                 processed_df: pd.DataFrame) -> Dict[str, Any]:
        """验证处理结果"""
        validation_result = {
            "is_valid": True,
            "warnings": [],
            "statistics": {}
        }
        
        # 检查数据完整性
        if processed_df.empty:
            validation_result["warnings"].append("处理后数据为空")
            validation_result["is_valid"] = False
        
        # 检查行数变化
        original_rows = len(original_df)
        processed_rows = len(processed_df)
        row_change_ratio = processed_rows / original_rows if original_rows > 0 else 0
        
        if row_change_ratio < 0.01:
            validation_result["warnings"].append("处理后数据行数过少，可能过度筛选")
        elif row_change_ratio > 10:
            validation_result["warnings"].append("处理后数据行数过多，可能存在重复")
        
        # 统计信息
        validation_result["statistics"] = {
            "original_rows": original_rows,
            "processed_rows": processed_rows,
            "row_change_ratio": row_change_ratio,
            "original_columns": len(original_df.columns),
            "processed_columns": len(processed_df.columns)
        }
        
        return validation_result
    
    def _execute_default_order(self, df, analysis, code_parts):
        """执行默认的数据处理顺序：预筛选 -> 分组聚合 -> 后筛选 -> 排序 -> 限制行数"""
        engine_logger.debug("\n使用默认执行顺序: 预筛选 -> 分组聚合 -> 后筛选 -> 排序 -> 限制行数")
        
        # 1. 预筛选（如果可能）
        engine_logger.debug("\n步骤1: 检查是否可以进行预筛选...")
        pre_filter_applied = False
        if analysis.filter_condition and analysis.group_by and analysis.aggregation:
            # 检查筛选条件是否可以在聚合前应用
            filter_field = self._extract_filter_field(analysis.filter_condition)
            # 判断筛选字段是否会被聚合：如果筛选字段是y_field或在y_fields中，或者是数值字段且没有明确指定y_field，则需要聚合后筛选
            is_agg_field = (filter_field == analysis.y_field or 
                           (analysis.y_fields and filter_field in analysis.y_fields) or
                           (not analysis.y_field and not analysis.y_fields and filter_field in df.select_dtypes(include=['number']).columns and analysis.aggregation != 'count'))
            
            if filter_field and filter_field in df.columns and filter_field != analysis.group_by and not is_agg_field:
                # 如果筛选字段不是分组字段且不是聚合字段，可以预筛选
                engine_logger.debug(f"  可以进行预筛选，筛选字段: {filter_field}")
                try:
                    df = df.query(analysis.filter_condition)
                    code_parts.append(f"df = df.query('{analysis.filter_condition}')")
                    pre_filter_applied = True
                    engine_logger.debug(f"  预筛选完成，数据形状: {df.shape}")
                except Exception as e:
                    engine_logger.debug(f"  预筛选失败: {str(e)}")
                    engine_logger.warning(f"预筛选失败: {str(e)}")
            else:
                engine_logger.debug(f"  无法进行预筛选，筛选字段: {filter_field}, 是否为聚合字段: {is_agg_field}")
        
        # 2. 分组聚合
        engine_logger.debug("\n步骤2: 执行分组聚合...")
        agg_field_used = None
        if analysis.group_by and analysis.aggregation:
            try:
                if analysis.aggregation in ['sum', 'mean', 'count', 'max', 'min']:
                    # 智能选择聚合字段
                    agg_field = None
                    if analysis.y_field and analysis.y_field in df.columns:
                        agg_field = analysis.y_field
                    elif analysis.y_fields:
                        agg_fields = [f for f in analysis.y_fields if f in df.columns]
                        if agg_fields:
                            agg_field = agg_fields[0]
                    else:
                        # 自动选择数值字段
                        numeric_cols = df.select_dtypes(include=['number']).columns.tolist()
                        numeric_cols = [col for col in numeric_cols if col != analysis.group_by]
                        if numeric_cols:
                            agg_field = numeric_cols[0]
                    
                    if agg_field:
                        agg_field_used = agg_field
                        engine_logger.debug(f"  选择聚合字段: {agg_field}")
                        engine_logger.debug(f"  聚合方法: {analysis.aggregation}")
                        # 执行分组聚合
                        if analysis.aggregation == 'mean':
                            df = df.groupby(analysis.group_by)[agg_field].mean().reset_index()
                            code_parts.append(f"df = df.groupby('{analysis.group_by}')['{agg_field}'].mean().reset_index()")
                            engine_logger.debug(f"  执行平均值聚合，结果形状: {df.shape}")
                        elif analysis.aggregation == 'sum':
                            df = df.groupby(analysis.group_by)[agg_field].sum().reset_index()
                            code_parts.append(f"df = df.groupby('{analysis.group_by}')['{agg_field}'].sum().reset_index()")
                            engine_logger.debug(f"  执行求和聚合，结果形状: {df.shape}")
                        elif analysis.aggregation == 'max':
                            df = df.groupby(analysis.group_by)[agg_field].max().reset_index()
                            code_parts.append(f"df = df.groupby('{analysis.group_by}')['{agg_field}'].max().reset_index()")
                            engine_logger.debug(f"  执行最大值聚合，结果形状: {df.shape}")
                        elif analysis.aggregation == 'min':
                            df = df.groupby(analysis.group_by)[agg_field].min().reset_index()
                            code_parts.append(f"df = df.groupby('{analysis.group_by}')['{agg_field}'].min().reset_index()")
                            engine_logger.debug(f"  执行最小值聚合，结果形状: {df.shape}")
                        elif analysis.aggregation == 'count':
                            df = df.groupby(analysis.group_by).size().reset_index(name='count')
                            code_parts.append(f"df = df.groupby('{analysis.group_by}').size().reset_index(name='count')")
                            agg_field_used = 'count'
                            engine_logger.debug(f"  执行计数聚合，结果形状: {df.shape}")
                    else:
                        # 如果没有找到合适的聚合字段，使用计数
                        engine_logger.debug("  未找到合适的聚合字段，使用计数聚合")
                        df = df.groupby(analysis.group_by).size().reset_index(name='count')
                        code_parts.append(f"df = df.groupby('{analysis.group_by}').size().reset_index(name='count')")
                        agg_field_used = 'count'
                        engine_logger.debug(f"  执行计数聚合，结果形状: {df.shape}")
                        
            except Exception as e:
                engine_logger.warning(f"分组聚合失败: {str(e)}")
        
        # 3. 后筛选（如果筛选条件依赖聚合结果或预筛选未执行）
        engine_logger.debug("\n步骤3: 检查是否需要后筛选...")
        if analysis.filter_condition and not pre_filter_applied:
            engine_logger.debug(f"  需要进行后筛选，原始条件: {analysis.filter_condition}")
            try:
                # 如果筛选字段是聚合后的字段，需要调整筛选条件
                adjusted_condition = self._adjust_filter_condition(analysis.filter_condition, agg_field_used)
                engine_logger.debug(f"  调整后的筛选条件: {adjusted_condition}")
                df = df.query(adjusted_condition)
                code_parts.append(f"df = df.query('{adjusted_condition}')")
                engine_logger.debug(f"  后筛选完成，结果形状: {df.shape}")
            except Exception as e:
                engine_logger.debug(f"  后筛选失败: {str(e)}")
                engine_logger.warning(f"后筛选失败: {str(e)}")
        else:
            engine_logger.debug("  无需后筛选（已进行预筛选或无筛选条件）")
        
        # 4. 排序
        engine_logger.debug("\n步骤4: 检查是否需要排序...")
        if analysis.sort_by:
            engine_logger.debug(f"  需要排序，排序字段: {analysis.sort_by}, 排序方向: {analysis.sort_order}")
            try:
                # 智能确定排序字段
                engine_logger.debug(f"  原始排序字段: {analysis.sort_by}")
                engine_logger.debug(f"  当前可用字段: {df.columns.tolist()}")
                engine_logger.debug(f"  聚合字段: {agg_field_used}")
                
                sort_field = self._determine_sort_field(analysis.sort_by, df.columns.tolist(), agg_field_used)
                engine_logger.debug(f"  智能确定的排序字段: {sort_field}")
                
                if sort_field and sort_field in df.columns:
                    ascending = analysis.sort_order == "asc"
                    df = df.sort_values(sort_field, ascending=ascending)
                    code_parts.append(f"df = df.sort_values('{sort_field}', ascending={ascending})")
                    engine_logger.debug(f"  排序完成，使用字段: {sort_field}，结果形状: {df.shape}")
                else:
                    engine_logger.debug(f"  无法确定有效的排序字段")
                    engine_logger.debug(f"  原始排序字段 '{analysis.sort_by}' 不存在")
                    engine_logger.debug(f"  可用字段: {df.columns.tolist()}")
                    engine_logger.warning(f"排序失败：无法找到有效的排序字段。原始字段: {analysis.sort_by}, 可用字段: {df.columns.tolist()}")
            except Exception as e:
                engine_logger.debug(f"  排序失败: {str(e)}")
                engine_logger.warning(f"排序失败: {str(e)}")
        else:
            engine_logger.debug("  无需排序")
        
        # 5. 限制行数
        engine_logger.debug("\n步骤5: 检查是否需要限制行数...")
        if analysis.limit and analysis.limit > 0:
            engine_logger.debug(f"  需要限制行数: {analysis.limit}")
            df = df.head(analysis.limit)
            code_parts.append(f"df = df.head({analysis.limit})")
            engine_logger.debug(f"  限制行数完成，最终形状: {df.shape}")
        else:
            engine_logger.debug("  无需限制行数")
            
        return df, code_parts
    
    def _update_field_mapping(self, analysis: ChartAnalysis, processed_df) -> None:
        """更新字段映射以匹配处理后的DataFrame"""
        available_fields = list(processed_df.columns)
        engine_logger.info(f"更新字段映射，可用字段: {available_fields}")
        
        # 更新x_field
        if analysis.x_field and analysis.x_field not in available_fields:
            # 如果原x_field不存在，尝试找到合适的替代字段
            if analysis.group_by and analysis.group_by in available_fields:
                old_x = analysis.x_field
                analysis.x_field = analysis.group_by
                engine_logger.info(f"x_field从 '{old_x}' 更新为 '{analysis.x_field}'")
            else:
                # 找第一个非数值字段作为x轴
                for field in available_fields:
                    if processed_df[field].dtype in ['object', 'category']:
                        old_x = analysis.x_field
                        analysis.x_field = field
                        engine_logger.info(f"x_field从 '{old_x}' 更新为 '{analysis.x_field}'")
                        break
        
        # 更新y_field
        if analysis.y_field and analysis.y_field not in available_fields:
            # 如果原y_field不存在，找第一个数值字段
            for field in available_fields:
                if processed_df[field].dtype in ['int64', 'float64', 'int32', 'float32']:
                    old_y = analysis.y_field
                    analysis.y_field = field
                    engine_logger.info(f"y_field从 '{old_y}' 更新为 '{analysis.y_field}'")
                    break
        
        # 更新name_field和value_field（主要用于饼图）
        if analysis.name_field and analysis.name_field not in available_fields:
            if analysis.group_by and analysis.group_by in available_fields:
                old_name = analysis.name_field
                analysis.name_field = analysis.group_by
                engine_logger.info(f"name_field从 '{old_name}' 更新为 '{analysis.name_field}'")
        
        if analysis.value_field and analysis.value_field not in available_fields:
            # 找第一个数值字段作为value
            for field in available_fields:
                if processed_df[field].dtype in ['int64', 'float64', 'int32', 'float32']:
                    old_value = analysis.value_field
                    analysis.value_field = field
                    engine_logger.info(f"value_field从 '{old_value}' 更新为 '{analysis.value_field}'")
                    break
        
        engine_logger.info(f"字段映射更新完成: x_field={analysis.x_field}, y_field={analysis.y_field}, name_field={analysis.name_field}, value_field={analysis.value_field}")
    
    def _execute_smart_order(self, df, analysis, code_parts):
        """根据LLM识别的执行顺序智能执行数据处理步骤 - 性能优化版本"""
        if engine_logger.logger.level <= 10:  # DEBUG level
            engine_logger.debug(f"\n使用智能执行顺序: {' -> '.join(analysis.execution_order)}")
        
        # 用于跟踪处理状态
        agg_field_used = None
        filter_applied = False
        
        # 性能优化：预先计算需要的操作，避免重复检查
        has_filter = bool(analysis.filter_condition)
        has_aggregation = bool(analysis.group_by and analysis.aggregation)
        has_sort = bool(analysis.sort_by)
        has_limit = bool(analysis.limit and analysis.limit > 0)
        
        # 按照LLM指定的顺序执行步骤
        for i, step in enumerate(analysis.execution_order):
            if engine_logger.logger.level <= 10:  # DEBUG level
                engine_logger.debug(f"\n步骤{i+1}: 执行 {step}...")
            
            if (step == "filter" or step == "post_filter") and not filter_applied and has_filter:
                # 执行筛选（包括聚合后筛选）
                if engine_logger.logger.level <= 10:  # DEBUG level
                    engine_logger.debug(f"  执行筛选，条件: {analysis.filter_condition}")
                try:
                    # 根据当前状态调整筛选条件
                    adjusted_condition = (self._adjust_filter_condition(analysis.filter_condition, agg_field_used) 
                                        if agg_field_used else analysis.filter_condition)
                    if engine_logger.logger.level <= 10:  # DEBUG level
                        engine_logger.debug(f"  调整后的筛选条件: {adjusted_condition}")
                    df = df.query(adjusted_condition)
                    code_parts.append(f"df = df.query('{adjusted_condition}')")
                    filter_applied = True
                    if engine_logger.logger.level <= 10:  # DEBUG level
                        engine_logger.debug(f"  筛选后数据形状: {df.shape}")
                except Exception as e:
                    if engine_logger.logger.level <= 10:  # DEBUG level
                        engine_logger.debug(f"  筛选失败: {str(e)}")
                    engine_logger.warning(f"筛选失败: {str(e)}")
            elif (step == "filter" or step == "post_filter") and not has_filter:
                if engine_logger.logger.level <= 10:  # DEBUG level
                    engine_logger.debug("  无筛选条件，跳过")
            elif (step == "filter" or step == "post_filter") and filter_applied:
                if engine_logger.logger.level <= 10:  # DEBUG level
                    engine_logger.debug("  筛选已在前面执行，跳过")
                    
            elif step == "group_aggregate" and has_aggregation:
                # 执行分组聚合
                try:
                    if analysis.aggregation in ['sum', 'mean', 'count', 'max', 'min']:
                        # 性能优化：智能选择聚合字段
                        agg_field = self._select_aggregation_field(analysis, df)
                        
                        if agg_field:
                            agg_field_used = agg_field
                            if engine_logger.logger.level <= 10:  # DEBUG level
                                engine_logger.debug(f"  选择聚合字段: {agg_field}")
                                engine_logger.debug(f"  聚合方法: {analysis.aggregation}")
                            # 执行分组聚合
                            if analysis.aggregation == 'mean':
                                df = df.groupby(analysis.group_by)[agg_field].mean().reset_index()
                                code_parts.append(f"df = df.groupby('{analysis.group_by}')['{agg_field}'].mean().reset_index()")
                                if engine_logger.logger.level <= 10:  # DEBUG level
                                    engine_logger.debug(f"  执行平均值聚合，结果形状: {df.shape}")
                            elif analysis.aggregation == 'sum':
                                df = df.groupby(analysis.group_by)[agg_field].sum().reset_index()
                                code_parts.append(f"df = df.groupby('{analysis.group_by}')['{agg_field}'].sum().reset_index()")
                                if engine_logger.logger.level <= 10:  # DEBUG level
                                    engine_logger.debug(f"  执行求和聚合，结果形状: {df.shape}")
                            elif analysis.aggregation == 'max':
                                df = df.groupby(analysis.group_by)[agg_field].max().reset_index()
                                code_parts.append(f"df = df.groupby('{analysis.group_by}')['{agg_field}'].max().reset_index()")
                                if engine_logger.logger.level <= 10:  # DEBUG level
                                    engine_logger.debug(f"  执行最大值聚合，结果形状: {df.shape}")
                            elif analysis.aggregation == 'min':
                                df = df.groupby(analysis.group_by)[agg_field].min().reset_index()
                                code_parts.append(f"df = df.groupby('{analysis.group_by}')['{agg_field}'].min().reset_index()")
                                if engine_logger.logger.level <= 10:  # DEBUG level
                                    engine_logger.debug(f"  执行最小值聚合，结果形状: {df.shape}")
                            elif analysis.aggregation == 'count':
                                df = df.groupby(analysis.group_by).size().reset_index(name='count')
                                code_parts.append(f"df = df.groupby('{analysis.group_by}').size().reset_index(name='count')")
                                agg_field_used = 'count'
                                if engine_logger.logger.level <= 10:  # DEBUG level
                                    engine_logger.debug(f"  执行计数聚合，结果形状: {df.shape}")
                        else:
                            # 如果没有找到合适的聚合字段，使用计数
                            if engine_logger.logger.level <= 10:  # DEBUG level
                                engine_logger.debug("  未找到合适的聚合字段，使用计数聚合")
                            df = df.groupby(analysis.group_by).size().reset_index(name='count')
                            code_parts.append(f"df = df.groupby('{analysis.group_by}').size().reset_index(name='count')")
                            agg_field_used = 'count'
                            if engine_logger.logger.level <= 10:  # DEBUG level
                                engine_logger.debug(f"  执行计数聚合，结果形状: {df.shape}")
                                
                except Exception as e:
                    engine_logger.warning(f"分组聚合失败: {str(e)}")
            elif step == "group_aggregate" and not has_aggregation:
                if engine_logger.logger.level <= 10:  # DEBUG level
                    engine_logger.debug("  无分组聚合需求，跳过")
                    
            elif step == "sort":
                # 执行排序
                if analysis.sort_by:
                    engine_logger.debug(f"  需要排序，排序字段: {analysis.sort_by}, 排序方向: {analysis.sort_order}")
                    try:
                        # 智能确定排序字段
                        engine_logger.debug(f"  原始排序字段: {analysis.sort_by}")
                        engine_logger.debug(f"  当前可用字段: {df.columns.tolist()}")
                        engine_logger.debug(f"  聚合字段: {agg_field_used}")
                        
                        sort_field = self._determine_sort_field(analysis.sort_by, df.columns.tolist(), agg_field_used)
                        engine_logger.debug(f"  智能确定的排序字段: {sort_field}")
                        
                        if sort_field and sort_field in df.columns:
                            ascending = analysis.sort_order == "asc"
                            df = df.sort_values(sort_field, ascending=ascending)
                            code_parts.append(f"df = df.sort_values('{sort_field}', ascending={ascending})")
                            engine_logger.debug(f"  排序完成，使用字段: {sort_field}，结果形状: {df.shape}")
                        else:
                            engine_logger.debug(f"  无法确定有效的排序字段")
                            engine_logger.debug(f"  原始排序字段 '{analysis.sort_by}' 不存在")
                            engine_logger.debug(f"  可用字段: {df.columns.tolist()}")
                            engine_logger.warning(f"排序失败：无法找到有效的排序字段。原始字段: {analysis.sort_by}, 可用字段: {df.columns.tolist()}")
                    except Exception as e:
                        engine_logger.debug(f"  排序失败: {str(e)}")
                        engine_logger.warning(f"排序失败: {str(e)}")
                else:
                    engine_logger.debug("  无排序需求，跳过")
                    
            elif step == "limit":
                # 执行限制行数
                if analysis.limit and analysis.limit > 0:
                    engine_logger.debug(f"  需要限制行数: {analysis.limit}")
                    df = df.head(analysis.limit)
                    code_parts.append(f"df = df.head({analysis.limit})")
                    engine_logger.debug(f"  限制行数完成，最终形状: {df.shape}")
                else:
                    engine_logger.debug("  无行数限制需求，跳过")
                    
            else:
                engine_logger.debug(f"  未知步骤: {step}，跳过")
        
        # 如果筛选条件存在但未被执行（可能因为执行顺序中没有包含filter步骤），则在最后执行
        if analysis.filter_condition and not filter_applied:
            engine_logger.debug("\n补充执行: 筛选条件未在指定顺序中执行，现在执行...")
            try:
                adjusted_condition = self._adjust_filter_condition(analysis.filter_condition, agg_field_used)
                engine_logger.debug(f"  调整后的筛选条件: {adjusted_condition}")
                df = df.query(adjusted_condition)
                code_parts.append(f"df = df.query('{adjusted_condition}')")
                engine_logger.debug(f"  补充筛选完成，结果形状: {df.shape}")
            except Exception as e:
                engine_logger.debug(f"  补充筛选失败: {str(e)}")
                engine_logger.warning(f"补充筛选失败: {str(e)}")
                
        return df, code_parts