import pandas as pd
import json
import re
from typing import Dict, List, Any, Union, Optional
from pathlib import Path
import openpyxl
from openpyxl.utils import get_column_letter, column_index_from_string
from django.core.files.uploadedfile import UploadedFile
from .models import Template, ProcessingRule, DataSourceConfig, OutputConfig, FieldConfig


class TemplateProcessor:
    """模板处理引擎"""
    
    def __init__(self, template: Template):
        self.template = template
        self.data_sources = {}
        self.processed_data = {}
        self.processing_log = []
        
    def process_files(self, uploaded_files: Dict[str, UploadedFile]) -> Dict[str, Any]:
        """处理上传的文件"""
        try:
            # 1. 加载数据源
            self._load_data_sources(uploaded_files)
            
            # 2. 执行处理规则
            self._execute_processing_rules()
            
            # 3. 生成输出
            output_results = self._generate_outputs()
            
            return {
                'success': True,
                'results': output_results,
                'processing_log': self.processing_log,
                'summary': self._get_processing_summary()
            }
            
        except Exception as e:
            return {
                'success': False,
                'error': str(e),
                'processing_log': self.processing_log
            }
    
    def _load_data_sources(self, uploaded_files: Dict[str, UploadedFile]):
        """加载数据源"""
        data_source_configs = self.template.data_sources.filter(is_active=True).order_by('source_order')
        
        for config in data_source_configs:
            source_name = config.source_name
            if source_name in uploaded_files:
                file_obj = uploaded_files[source_name]
                
                # 根据数据源类型加载数据
                if config.source_type == 'excel':
                    data = self._load_excel_data(file_obj, config)
                elif config.source_type == 'csv':
                    data = self._load_csv_data(file_obj, config)
                else:
                    raise ValueError(f"不支持的数据源类型: {config.source_type}")
                
                self.data_sources[source_name] = {
                    'data': data,
                    'config': config,
                    'original_data': data.copy()
                }
                
                self.processing_log.append(f"已加载数据源: {source_name}, 数据行数: {len(data)}")
    
    def _load_excel_data(self, file_obj: UploadedFile, config: DataSourceConfig) -> pd.DataFrame:
        """加载Excel数据"""
        # 根据模板类型选择不同的读取方式
        if self.template.template_type == 'tabular':
            return self._load_tabular_excel(file_obj, config)
        elif self.template.template_type == 'document':
            return self._load_document_excel(file_obj, config)
        else:
            return self._load_mixed_excel(file_obj, config)
    
    def _load_tabular_excel(self, file_obj: UploadedFile, config: DataSourceConfig) -> pd.DataFrame:
        """加载表格式Excel数据"""
        # 获取数据区域配置
        data_range = config.data_range
        header_config = config.header_config
        
        # 读取Excel文件
        if data_range:
            # 指定数据区域
            startrow = data_range.get('start_row', 0)
            nrows = data_range.get('row_count')
            usecols = data_range.get('columns')
            
            df = pd.read_excel(
                file_obj,
                sheet_name=data_range.get('sheet_name', 0),
                header=header_config.get('header_row', 0) if header_config.get('has_header', True) else None,
                skiprows=startrow,
                nrows=nrows,
                usecols=usecols
            )
        else:
            # 读取整个工作表
            df = pd.read_excel(
                file_obj,
                sheet_name=0,
                header=0 if header_config.get('has_header', True) else None
            )
        
        return df
    
    def _load_document_excel(self, file_obj: UploadedFile, config: DataSourceConfig) -> pd.DataFrame:
        """加载单据式Excel数据"""
        workbook = openpyxl.load_workbook(file_obj)
        worksheet = workbook.active
        
        # 根据读取方向提取数据
        data_dict = {}
        
        if config.read_direction == 'horizontal':
            # 水平读取：每行是一个字段
            for row in worksheet.iter_rows(values_only=True):
                if row[0]:  # 第一列作为字段名
                    field_name = str(row[0])
                    field_value = row[1] if len(row) > 1 else None
                    data_dict[field_name] = [field_value]
        
        elif config.read_direction == 'vertical':
            # 垂直读取：每列是一个字段
            for col_idx, col in enumerate(worksheet.iter_cols(values_only=True), 1):
                if col[0]:  # 第一行作为字段名
                    field_name = str(col[0])
                    field_values = [v for v in col[1:] if v is not None]
                    data_dict[field_name] = field_values if field_values else [None]
        
        elif config.read_direction == 'custom':
            # 自定义读取：根据配置的单元格位置读取
            custom_config = config.data_range.get('custom_mapping', {})
            for field_name, cell_position in custom_config.items():
                cell_value = worksheet[cell_position].value
                data_dict[field_name] = [cell_value]
        
        # 转换为DataFrame
        max_length = max(len(v) for v in data_dict.values()) if data_dict else 1
        for key, values in data_dict.items():
            if len(values) < max_length:
                data_dict[key].extend([None] * (max_length - len(values)))
        
        return pd.DataFrame(data_dict)
    
    def _load_mixed_excel(self, file_obj: UploadedFile, config: DataSourceConfig) -> pd.DataFrame:
        """加载混合格式Excel数据"""
        # 混合格式：结合表格式和单据式的读取方式
        # 这里可以根据具体需求实现复杂的读取逻辑
        return self._load_tabular_excel(file_obj, config)
    
    def _load_csv_data(self, file_obj: UploadedFile, config: DataSourceConfig) -> pd.DataFrame:
        """加载CSV数据"""
        header_config = config.header_config
        
        df = pd.read_csv(
            file_obj,
            header=0 if header_config.get('has_header', True) else None,
            encoding='utf-8-sig'
        )
        
        return df
    
    def _execute_processing_rules(self):
        """执行处理规则"""
        processing_rules = self.template.processing_rules.filter(is_active=True).order_by('rule_order')
        
        for rule in processing_rules:
            try:
                self._execute_single_rule(rule)
                self.processing_log.append(f"已执行规则: {rule.rule_name}")
            except Exception as e:
                self.processing_log.append(f"规则执行失败: {rule.rule_name}, 错误: {str(e)}")
                raise
    
    def _execute_single_rule(self, rule: ProcessingRule):
        """执行单个处理规则"""
        rule_type = rule.rule_type
        rule_config = rule.rule_config
        
        if rule_type == 'data_extraction':
            self._execute_data_extraction(rule_config)
        elif rule_type == 'field_mapping':
            self._execute_field_mapping(rule_config)
        elif rule_type == 'data_transformation':
            self._execute_data_transformation(rule_config)
        elif rule_type == 'data_validation':
            self._execute_data_validation(rule_config)
        elif rule_type == 'data_aggregation':
            self._execute_data_aggregation(rule_config)
        elif rule_type == 'file_association':
            self._execute_file_association(rule_config)
        elif rule_type == 'output_generation':
            self._execute_output_generation(rule_config)
    
    def _execute_data_extraction(self, config: Dict[str, Any]):
        """执行数据提取"""
        source_name = config.get('source_name')
        extraction_rules = config.get('extraction_rules', [])
        
        if source_name not in self.data_sources:
            raise ValueError(f"数据源不存在: {source_name}")
        
        source_data = self.data_sources[source_name]['data']
        extracted_data = source_data.copy()
        
        for rule in extraction_rules:
            rule_type = rule.get('type')
            
            if rule_type == 'filter':
                # 数据过滤
                condition = rule.get('condition')
                extracted_data = extracted_data.query(condition)
            
            elif rule_type == 'select_columns':
                # 选择列
                columns = rule.get('columns', [])
                extracted_data = extracted_data[columns]
            
            elif rule_type == 'slice_rows':
                # 行切片
                start = rule.get('start', 0)
                end = rule.get('end')
                extracted_data = extracted_data.iloc[start:end]
        
        # 保存提取的数据
        result_name = config.get('result_name', f"{source_name}_extracted")
        self.processed_data[result_name] = extracted_data
    
    def _execute_field_mapping(self, config: Dict[str, Any]):
        """执行字段映射"""
        source_name = config.get('source_name')
        mapping_rules = config.get('mapping_rules', {})
        
        if source_name not in self.data_sources and source_name not in self.processed_data:
            raise ValueError(f"数据源不存在: {source_name}")
        
        # 获取源数据
        if source_name in self.data_sources:
            source_data = self.data_sources[source_name]['data'].copy()
        else:
            source_data = self.processed_data[source_name].copy()
        
        # 执行字段映射
        mapped_data = pd.DataFrame()
        
        for target_field, mapping_rule in mapping_rules.items():
            if isinstance(mapping_rule, str):
                # 简单字段映射
                if mapping_rule in source_data.columns:
                    mapped_data[target_field] = source_data[mapping_rule]
            
            elif isinstance(mapping_rule, dict):
                rule_type = mapping_rule.get('type')
                
                if rule_type == 'fixed_value':
                    # 固定值
                    value = mapping_rule.get('value')
                    mapped_data[target_field] = [value] * len(source_data)
                
                elif rule_type == 'formula':
                    # 公式计算（支持pandas表达式）
                    formula = mapping_rule.get('formula')
                    mapped_data[target_field] = source_data.eval(formula)
                
                elif rule_type == 'lookup':
                    # 查找映射
                    lookup_table = mapping_rule.get('lookup_table', {})
                    source_field = mapping_rule.get('source_field')
                    mapped_data[target_field] = source_data[source_field].map(lookup_table)
                
                elif rule_type == 'concatenate':
                    # 字段连接
                    fields = mapping_rule.get('fields', [])
                    separator = mapping_rule.get('separator', '')
                    values = []
                    for _, row in source_data.iterrows():
                        concat_value = separator.join([str(row[f]) for f in fields if f in source_data.columns])
                        values.append(concat_value)
                    mapped_data[target_field] = values
        
        # 保存映射结果
        result_name = config.get('result_name', f"{source_name}_mapped")
        self.processed_data[result_name] = mapped_data
    
    def _execute_data_transformation(self, config: Dict[str, Any]):
        """执行数据转换"""
        source_name = config.get('source_name')
        transformation_rules = config.get('transformation_rules', [])
        
        # 获取源数据
        if source_name in self.data_sources:
            source_data = self.data_sources[source_name]['data'].copy()
        else:
            source_data = self.processed_data[source_name].copy()
        
        transformed_data = source_data.copy()
        
        for rule in transformation_rules:
            rule_type = rule.get('type')
            target_field = rule.get('target_field')
            
            if rule_type == 'split':
                # 字段拆分
                source_field = rule.get('source_field')
                delimiter = rule.get('delimiter', ',')
                split_type = rule.get('split_type', 'expand')  # expand, first, last, nth
                
                if split_type == 'expand':
                    # 拆分为多列
                    split_data = transformed_data[source_field].str.split(delimiter, expand=True)
                    for i, col in enumerate(split_data.columns):
                        transformed_data[f"{target_field}_{i+1}"] = split_data[col]
                else:
                    # 拆分取特定部分
                    split_series = transformed_data[source_field].str.split(delimiter)
                    if split_type == 'first':
                        transformed_data[target_field] = split_series.str[0]
                    elif split_type == 'last':
                        transformed_data[target_field] = split_series.str[-1]
                    elif split_type == 'nth':
                        nth = rule.get('nth', 0)
                        transformed_data[target_field] = split_series.str[nth]
            
            elif rule_type == 'aggregate':
                # 字段聚合
                source_fields = rule.get('source_fields', [])
                agg_method = rule.get('method', 'concat')
                separator = rule.get('separator', ',')
                
                if agg_method == 'concat':
                    # 字符串连接
                    values = []
                    for _, row in transformed_data.iterrows():
                        concat_value = separator.join([str(row[f]) for f in source_fields if pd.notna(row[f])])
                        values.append(concat_value)
                    transformed_data[target_field] = values
                
                elif agg_method in ['sum', 'mean', 'max', 'min']:
                    # 数值聚合
                    numeric_data = transformed_data[source_fields].select_dtypes(include=['number'])
                    if agg_method == 'sum':
                        transformed_data[target_field] = numeric_data.sum(axis=1)
                    elif agg_method == 'mean':
                        transformed_data[target_field] = numeric_data.mean(axis=1)
                    elif agg_method == 'max':
                        transformed_data[target_field] = numeric_data.max(axis=1)
                    elif agg_method == 'min':
                        transformed_data[target_field] = numeric_data.min(axis=1)
            
            elif rule_type == 'generate_rows':
                # 一对多行生成（支持容器类型扩展）
                container_fields = rule.get('container_fields', [])  # 容器类型字段列表
                target_container_field = rule.get('target_container_field', 'container_type')
                target_quantity_field = rule.get('target_quantity_field', 'quantity')
                
                new_rows = []
                for _, row in transformed_data.iterrows():
                    row_generated = False
                    
                    # 处理容器类型字段
                    for container_field in container_fields:
                        if container_field in row and pd.notna(row[container_field]):
                            value = str(row[container_field]).strip()
                            if value and value != '0':
                                # 生成对应数量的行
                                try:
                                    quantity = int(float(value))
                                    container_type = container_field.replace('_count', '').replace('_qty', '')
                                    
                                    for _ in range(quantity):
                                        new_row = row.copy()
                                        new_row[target_container_field] = container_type
                                        new_row[target_quantity_field] = 1
                                        new_rows.append(new_row)
                                    
                                    row_generated = True
                                except (ValueError, TypeError):
                                    continue
                    
                    # 如果没有生成任何行，保持原行
                    if not row_generated:
                        new_rows.append(row)
                
                transformed_data = pd.DataFrame(new_rows).reset_index(drop=True)
            
            elif rule_type == 'container_expansion':
                # 通用集装箱类型扩展功能
                container_config = rule.get('container_config', {})
                expansion_mode = rule.get('expansion_mode', 'separate_rows')
                skip_zero_quantity = rule.get('skip_zero_quantity', True)
                target_container_field = rule.get('target_container_field', 'container_type')
                target_quantity_field = rule.get('target_quantity_field', 'quantity')
                
                # 支持多种配置方式
                container_fields = container_config.get('fields', [])
                if not container_fields:
                    # 兼容旧配置格式
                    supported_containers = rule.get('supported_containers', [])
                    quantity_suffix = rule.get('quantity_suffix', '')
                    
                    if not supported_containers:
                        # 默认集装箱类型
                        supported_containers = [
                            {'type': '20GP', 'description': '20英尺普通柜'},
                            {'type': '40GP', 'description': '40英尺普通柜'},
                            {'type': '40HQ', 'description': '40英尺高柜'},
                            {'type': '45HQ', 'description': '45英尺高柜'}
                        ]
                    
                    container_fields = [
                        {
                            'container_type': container['type'],
                            'quantity_field': f"{container['type']}{quantity_suffix}",
                            'description': container.get('description', '')
                        }
                        for container in supported_containers
                    ]
                
                if expansion_mode == 'separate_rows':
                    # 分行模式：根据数量生成多行
                    new_rows = []
                    for _, row in transformed_data.iterrows():
                        row_has_containers = False
                        
                        for container_field in container_fields:
                            container_type = container_field['container_type']
                            qty_field = container_field['quantity_field']
                            
                            if qty_field in row and pd.notna(row[qty_field]):
                                try:
                                    quantity = int(float(row[qty_field]))
                                    if quantity > 0:
                                        # 根据数量生成对应行数
                                        for i in range(quantity):
                                            new_row = row.copy()
                                            new_row[target_container_field] = container_type
                                            new_row[target_quantity_field] = 1  # 每行数量为1
                                            
                                            # 可选：清除其他容器数量字段
                                            if container_config.get('clear_source_fields', True):
                                                for cf in container_fields:
                                                    if cf['quantity_field'] in new_row:
                                                        new_row[cf['quantity_field']] = 0
                                            
                                            # 支持自定义字段处理
                                            custom_fields = container_field.get('custom_fields', {})
                                            for field_name, field_value in custom_fields.items():
                                                new_row[field_name] = field_value
                                            
                                            new_rows.append(new_row)
                                        row_has_containers = True
                                except (ValueError, TypeError) as e:
                                    self.processing_log.append(f"容器数量转换错误: {qty_field}={row[qty_field]}, 错误: {str(e)}")
                                    continue
                        
                        # 根据配置决定是否保持原行
                        if not row_has_containers and not skip_zero_quantity:
                            new_rows.append(row)
                    
                    if new_rows:
                        transformed_data = pd.DataFrame(new_rows).reset_index(drop=True)
                        self.processing_log.append(f"集装箱扩展完成: 原{len(transformed_data)}行 -> 新{len(new_rows)}行")
                
                elif expansion_mode == 'aggregate_rows':
                    # 聚合模式：将多个容器类型合并到一行
                    for _, row in transformed_data.iterrows():
                        total_containers = []
                        total_quantity = 0
                        
                        for container_field in container_fields:
                            container_type = container_field['container_type']
                            qty_field = container_field['quantity_field']
                            
                            if qty_field in row and pd.notna(row[qty_field]):
                                try:
                                    quantity = int(float(row[qty_field]))
                                    if quantity > 0:
                                        total_containers.append(f"{container_type}x{quantity}")
                                        total_quantity += quantity
                                except (ValueError, TypeError):
                                    continue
                        
                        # 设置聚合结果
                        if total_containers:
                            row[target_container_field] = ', '.join(total_containers)
                            row[target_quantity_field] = total_quantity
                
                elif expansion_mode == 'separate_columns':
                    # 分列模式：确保所有容器类型字段存在
                    for container_field in container_fields:
                        qty_field = container_field['quantity_field']
                        if qty_field not in transformed_data.columns:
                            transformed_data[qty_field] = 0
        
        # 保存转换结果
        result_name = config.get('result_name', f"{source_name}_transformed")
        self.processed_data[result_name] = transformed_data
    
    def _execute_data_validation(self, config: Dict[str, Any]):
        """执行数据验证"""
        source_name = config.get('source_name')
        validation_rules = config.get('validation_rules', [])
        
        # 获取源数据
        if source_name in self.data_sources:
            source_data = self.data_sources[source_name]['data'].copy()
        else:
            source_data = self.processed_data[source_name].copy()
        
        validation_errors = []
        
        for rule in validation_rules:
            rule_type = rule.get('type')
            
            if rule_type == 'carrier_master_waybill':
                # 承运人主单号验证（TO业务专用）
                carrier_field = rule.get('carrier_field', '承运人')
                master_waybill_field = rule.get('master_waybill_field', '主单号')
                supported_carriers = rule.get('supported_carriers', [])
                validation_type = rule.get('validation_type', 'none')
                validation_config = rule.get('validation_config', '')
                strict_mode = rule.get('strict_mode', False)
                error_message = rule.get('error_message', '承运人或主单号格式不正确')
                
                # 创建承运人代码映射
                carrier_map = {carrier.get('code', '').upper(): carrier.get('name', '') for carrier in supported_carriers}
                
                for idx, row in source_data.iterrows():
                    carrier = str(row.get(carrier_field, '')).strip()
                    master_waybill = str(row.get(master_waybill_field, '')).strip()
                    
                    if carrier and master_waybill:
                        carrier_upper = carrier.upper()
                        
                        # 检查承运人是否在支持列表中
                        if strict_mode and carrier_upper not in carrier_map:
                            validation_errors.append(f"行{idx+1}: 不支持的承运人: {carrier}")
                            continue
                        
                        # 根据验证类型验证主单号格式
                        if validation_type == 'length' and validation_config:
                            try:
                                expected_length = int(validation_config)
                                if len(master_waybill) != expected_length:
                                    validation_errors.append(f"行{idx+1}: {error_message}，主单号长度应为{expected_length}位")
                            except ValueError:
                                pass
                        
                        elif validation_type == 'regex' and validation_config:
                            try:
                                if not re.match(validation_config, master_waybill):
                                    validation_errors.append(f"行{idx+1}: {error_message}")
                            except re.error:
                                validation_errors.append(f"行{idx+1}: 正则表达式配置错误")
                        
                        elif validation_type == 'custom':
                            # 自定义验证逻辑可以在这里扩展
                            pass
                        
                        # 默认的承运人特定验证（保持向后兼容）
                        elif validation_type == 'none' and carrier_upper in ['CZ', 'MU', 'CA']:
                            if not re.match(r'^\d{3}\d{8}$', master_waybill):
                                validation_errors.append(f"行{idx+1}: {carrier}承运人主单号格式错误，应为3位+8位数字")
            
            elif rule_type == 'required_fields':
                # 必填字段验证
                required_fields = rule.get('fields', [])
                for field in required_fields:
                    if field in source_data.columns:
                        empty_rows = source_data[source_data[field].isna() | (source_data[field] == '')].index
                        for idx in empty_rows:
                            validation_errors.append(f"行{idx+1}: 字段'{field}'不能为空")
            
            elif rule_type == 'data_format':
                # 数据格式验证
                field = rule.get('field')
                pattern = rule.get('pattern')
                error_message = rule.get('error_message', f'字段{field}格式错误')
                
                if field in source_data.columns and pattern:
                    for idx, row in source_data.iterrows():
                        value = str(row.get(field, '')).strip()
                        if value and not re.match(pattern, value):
                            validation_errors.append(f"行{idx+1}: {error_message}")
        
        # 记录验证结果
        if validation_errors:
            self.processing_log.extend([f"数据验证错误: {error}" for error in validation_errors])
            if config.get('strict_mode', False):
                raise ValueError(f"数据验证失败: {'; '.join(validation_errors)}")
        else:
            self.processing_log.append(f"数据验证通过: {source_name}")
        
        # 保存验证后的数据
        result_name = config.get('result_name', f"{source_name}_validated")
        self.processed_data[result_name] = source_data
    
    def _execute_data_aggregation(self, config: Dict[str, Any]):
        """执行数据聚合"""
        source_name = config.get('source_name')
        group_by = config.get('group_by', [])
        agg_rules = config.get('aggregation_rules', {})
        
        # 获取源数据
        if source_name in self.data_sources:
            source_data = self.data_sources[source_name]['data'].copy()
        else:
            source_data = self.processed_data[source_name].copy()
        
        # 执行分组聚合
        if group_by:
            grouped_data = source_data.groupby(group_by).agg(agg_rules).reset_index()
        else:
            # 全局聚合
            agg_result = {}
            for field, method in agg_rules.items():
                if method == 'count':
                    agg_result[field] = [len(source_data)]
                elif method == 'sum':
                    agg_result[field] = [source_data[field].sum()]
                elif method == 'mean':
                    agg_result[field] = [source_data[field].mean()]
                # 添加更多聚合方法...
            
            grouped_data = pd.DataFrame(agg_result)
        
        # 保存聚合结果
        result_name = config.get('result_name', f"{source_name}_aggregated")
        self.processed_data[result_name] = grouped_data
    
    def _execute_file_association(self, config: Dict[str, Any]):
        """执行文件关联"""
        left_source = config.get('left_source')
        right_source = config.get('right_source')
        join_keys = config.get('join_keys', [])
        join_type = config.get('join_type', 'inner')  # inner, left, right, outer
        
        # 获取左右数据源
        if left_source in self.data_sources:
            left_data = self.data_sources[left_source]['data']
        else:
            left_data = self.processed_data[left_source]
        
        if right_source in self.data_sources:
            right_data = self.data_sources[right_source]['data']
        else:
            right_data = self.processed_data[right_source]
        
        # 执行关联
        if len(join_keys) == 1:
            # 单键关联
            key = join_keys[0]
            joined_data = pd.merge(left_data, right_data, on=key, how=join_type)
        else:
            # 多键关联
            joined_data = pd.merge(left_data, right_data, on=join_keys, how=join_type)
        
        # 保存关联结果
        result_name = config.get('result_name', f"{left_source}_{right_source}_joined")
        self.processed_data[result_name] = joined_data
    
    def _execute_output_generation(self, config: Dict[str, Any]):
        """执行输出生成"""
        # 这个方法在_generate_outputs中实现
        pass
    
    def _generate_outputs(self) -> Dict[str, Any]:
        """生成输出"""
        output_configs = self.template.output_configs.all()
        results = {}
        
        for output_config in output_configs:
            try:
                output_data = self._generate_single_output(output_config)
                results[output_config.output_name] = output_data
            except Exception as e:
                results[output_config.output_name] = {
                    'success': False,
                    'error': str(e)
                }
        
        return results
    
    def _generate_single_output(self, output_config: OutputConfig) -> Dict[str, Any]:
        """生成单个输出"""
        config = output_config.output_config
        source_name = config.get('source_name')
        
        # 获取输出数据
        if source_name in self.data_sources:
            output_data = self.data_sources[source_name]['data']
        elif source_name in self.processed_data:
            output_data = self.processed_data[source_name]
        else:
            raise ValueError(f"输出数据源不存在: {source_name}")
        
        # 根据输出类型处理数据
        if output_config.output_type == 'single_row':
            # 单行输出：取第一行或聚合为一行
            if len(output_data) > 0:
                output_data = output_data.iloc[[0]]
        
        elif output_config.output_type == 'multiple_files':
            # 多文件输出：按某个字段分组生成多个文件
            group_field = config.get('group_field')
            if group_field and group_field in output_data.columns:
                grouped = output_data.groupby(group_field)
                file_results = {}
                for name, group in grouped:
                    file_results[f"{name}.xlsx"] = self._dataframe_to_output(group, output_config)
                return {
                    'success': True,
                    'type': 'multiple_files',
                    'files': file_results
                }
        
        # 单文件输出
        return {
            'success': True,
            'type': 'single_file',
            'data': self._dataframe_to_output(output_data, output_config)
        }
    
    def _dataframe_to_output(self, df: pd.DataFrame, output_config: OutputConfig) -> Dict[str, Any]:
        """将DataFrame转换为输出格式"""
        if output_config.output_format == 'excel':
            # 转换为Excel格式的字典表示
            return {
                'format': 'excel',
                'data': df.to_dict('records'),
                'columns': df.columns.tolist(),
                'shape': df.shape
            }
        
        elif output_config.output_format == 'csv':
            return {
                'format': 'csv',
                'data': df.to_csv(index=False),
                'shape': df.shape
            }
        
        elif output_config.output_format == 'json':
            return {
                'format': 'json',
                'data': df.to_dict('records'),
                'shape': df.shape
            }
        
        else:
            return {
                'format': 'dict',
                'data': df.to_dict('records'),
                'columns': df.columns.tolist(),
                'shape': df.shape
            }
    
    def _get_processing_summary(self) -> Dict[str, Any]:
        """获取处理摘要"""
        return {
            'template_name': self.template.name,
            'template_type': self.template.template_type,
            'data_source_type': self.template.data_source_type,
            'data_sources_count': len(self.data_sources),
            'processed_data_count': len(self.processed_data),
            'total_processing_steps': len(self.processing_log),
            'processing_log': self.processing_log
        }