import pandas as pd
import openpyxl
import re
from typing import Dict, List, Any, Tuple
from django.core.files.uploadedfile import UploadedFile
import json


class ExcelProcessor:
    """Excel文件处理器，用于解析和处理Excel文件"""
    
    def __init__(self, file_path_or_file):
        self.file_path_or_file = file_path_or_file
        self.workbook = None
        self.worksheet = None
        self.structure = {}
        
    def load_excel(self, sheet_name=None):
        """加载Excel文件"""
        try:
            if isinstance(self.file_path_or_file, UploadedFile):
                self.workbook = openpyxl.load_workbook(self.file_path_or_file)
            else:
                self.workbook = openpyxl.load_workbook(self.file_path_or_file)
            
            if sheet_name:
                self.worksheet = self.workbook[sheet_name]
            else:
                self.worksheet = self.workbook.active
                
            return True
        except Exception as e:
            print(f"加载Excel文件失败: {e}")
            return False
    
    def extract_structure(self) -> Dict[str, Any]:
        """提取Excel文件结构信息"""
        if not self.worksheet:
            return {}
        
        structure = {
            'sheet_name': self.worksheet.title,
            'max_row': self.worksheet.max_row,
            'max_column': self.worksheet.max_column,
            'headers': {},
            'data_preview': [],
            'cell_data': {},
            'merged_cells': [],
            'images': []
        }
        
        # 提取合并单元格信息
        merged_cells = []
        for merged_range in self.worksheet.merged_cells.ranges:
            merged_cells.append({
                'range': str(merged_range),
                'min_row': merged_range.min_row,
                'max_row': merged_range.max_row,
                'min_col': merged_range.min_col,
                'max_col': merged_range.max_col,
                'top_left': openpyxl.utils.get_column_letter(merged_range.min_col) + str(merged_range.min_row)
            })
        structure['merged_cells'] = merged_cells
        
        # 提取图片信息
        images = []
        if hasattr(self.worksheet, '_images'):
            for img in self.worksheet._images:
                if hasattr(img, 'anchor') and hasattr(img.anchor, '_from'):
                    anchor = img.anchor._from
                    images.append({
                        'row': anchor.row + 1,  # openpyxl使用0索引，转换为1索引
                        'col': anchor.col + 1,
                        'cell_address': openpyxl.utils.get_column_letter(anchor.col + 1) + str(anchor.row + 1),
                        'width': getattr(img, 'width', 0),
                        'height': getattr(img, 'height', 0)
                    })
        structure['images'] = images
        
        # 提取表头信息（假设第一行是表头）
        headers = {}
        for col in range(1, self.worksheet.max_column + 1):
            cell = self.worksheet.cell(row=1, column=col)
            col_letter = openpyxl.utils.get_column_letter(col)
            headers[col_letter] = {
                'value': cell.value,
                'column_index': col,
                'cell_address': f'{col_letter}1'
            }
        
        structure['headers'] = headers
        
        # 提取前10行数据作为预览
        preview_rows = min(10, self.worksheet.max_row)
        for row in range(1, preview_rows + 1):
            row_data = {}
            for col in range(1, self.worksheet.max_column + 1):
                cell = self.worksheet.cell(row=row, column=col)
                col_letter = openpyxl.utils.get_column_letter(col)
                cell_address = f'{col_letter}{row}'
                
                # 检查是否为合并单元格
                is_merged = False
                merged_info = None
                for merged_range in self.worksheet.merged_cells.ranges:
                    if cell.coordinate in merged_range:
                        is_merged = True
                        # 获取合并单元格的左上角坐标
                        top_left_coord = openpyxl.utils.get_column_letter(merged_range.min_col) + str(merged_range.min_row)
                        merged_info = {
                            'range': str(merged_range),
                            'is_top_left': cell.coordinate == top_left_coord
                        }
                        break
                
                row_data[col_letter] = {
                    'value': cell.value,
                    'cell_address': cell_address,
                    'data_type': str(type(cell.value).__name__),
                    'is_merged': is_merged,
                    'merged_info': merged_info
                }
                
                # 存储所有单元格数据
                structure['cell_data'][cell_address] = {
                    'value': cell.value,
                    'row': row,
                    'column': col,
                    'data_type': str(type(cell.value).__name__),
                    'is_merged': is_merged,
                    'merged_info': merged_info
                }
            
            structure['data_preview'].append(row_data)
        
        self.structure = structure
        return structure
    
    def get_cell_value(self, cell_address: str):
        """获取指定单元格的值"""
        if not self.worksheet:
            return None
        
        try:
            cell = self.worksheet[cell_address]
            return cell.value
        except:
            return None
    
    def apply_field_rules(self, field_configs: Dict[str, Any], source_data: pd.DataFrame) -> pd.DataFrame:
        """应用字段配置规则处理数据"""
        result_data = source_data.copy()
        
        for field_name, config in field_configs.items():
            if not config.get('is_active', True):
                continue
                
            rule_type = config.get('rule_type')
            
            if rule_type == 'split':
                result_data = self._apply_split_rule(result_data, field_name, config)
            elif rule_type == 'aggregate':
                result_data = self._apply_aggregate_rule(result_data, field_name, config)
            elif rule_type == 'deduplicate':
                result_data = self._apply_deduplicate_rule(result_data, field_name, config)
            elif rule_type == 'generate':
                result_data = self._apply_generate_rule(result_data, field_name, config)
            elif rule_type == 'assign':
                result_data = self._apply_assign_rule(result_data, field_name, config)
            elif rule_type == 'transform':
                result_data = self._apply_transform_rule(result_data, field_name, config)
        
        return result_data
    
    def _apply_split_rule(self, data: pd.DataFrame, field_name: str, config: Dict) -> pd.DataFrame:
        """应用拆分规则"""
        if field_name not in data.columns:
            return data
        
        delimiter = config.get('split_delimiter', ',')
        split_type = config.get('split_type', 'first')
        split_index = config.get('split_index', 0)
        split_regex = config.get('split_regex', '')
        
        def split_value(value):
            if pd.isna(value) or value is None:
                return value
            
            value_str = str(value)
            
            if split_type == 'regex' and split_regex:
                match = re.search(split_regex, value_str)
                return match.group(1) if match and match.groups() else value
            
            parts = value_str.split(delimiter)
            
            if split_type == 'first':
                return parts[0] if parts else value
            elif split_type == 'last':
                return parts[-1] if parts else value
            elif split_type == 'nth':
                try:
                    return parts[split_index] if len(parts) > split_index else value
                except IndexError:
                    return value
            
            return value
        
        data[field_name] = data[field_name].apply(split_value)
        return data
    
    def _apply_aggregate_rule(self, data: pd.DataFrame, field_name: str, config: Dict) -> pd.DataFrame:
        """应用聚合规则"""
        direction = config.get('aggregate_direction', 'row')
        count = config.get('aggregate_count', 1)
        delimiter = config.get('aggregate_delimiter', ',')
        
        if field_name not in data.columns:
            return data
        
        # 按行聚合：将连续的几行数据合并
        if direction == 'row':
            new_rows = []
            for i in range(0, len(data), count):
                group = data.iloc[i:i+count]
                if len(group) > 0:
                    new_row = group.iloc[0].copy()
                    # 聚合指定字段的值
                    values = group[field_name].dropna().astype(str).tolist()
                    new_row[field_name] = delimiter.join(values)
                    new_rows.append(new_row)
            
            if new_rows:
                return pd.DataFrame(new_rows).reset_index(drop=True)
        
        # 按列聚合：将同一行的多个字段合并（需要指定列范围）
        elif direction == 'column':
            # 这里需要更复杂的逻辑来处理列聚合
            # 暂时返回原数据
            pass
        
        return data
    
    def _apply_deduplicate_rule(self, data: pd.DataFrame, field_name: str, config: Dict) -> pd.DataFrame:
        """应用去重规则"""
        if field_name in data.columns:
            data = data.drop_duplicates(subset=[field_name])
        return data
    
    def _apply_generate_rule(self, data: pd.DataFrame, field_name: str, config: Dict) -> pd.DataFrame:
        """应用增生规则"""
        pattern = config.get('generate_pattern', '')
        generate_field = config.get('generate_field_name', '')
        
        if not pattern or field_name not in data.columns:
            return data
        
        new_rows = []
        
        for index, row in data.iterrows():
            value = str(row[field_name]) if pd.notna(row[field_name]) else ''
            
            # 解析增生模式，如 "4*60HQ"
            match = re.match(r'(\d+)\*(.+)', value)
            if match:
                count = int(match.group(1))
                new_value = match.group(2)
                
                # 生成多行数据
                for _ in range(count):
                    new_row = row.copy()
                    if generate_field:
                        new_row[generate_field] = new_value
                    else:
                        new_row[field_name] = new_value
                    new_rows.append(new_row)
            else:
                # 如果不匹配模式，保留原行
                new_rows.append(row)
        
        if new_rows:
            return pd.DataFrame(new_rows).reset_index(drop=True)
        return data
    
    def _apply_assign_rule(self, data: pd.DataFrame, field_name: str, config: Dict) -> pd.DataFrame:
        """应用赋值规则"""
        assign_mode = config.get('assign_mode', 'overwrite')
        assign_value = config.get('assign_value', '')
        
        if assign_mode == 'overwrite':
            # 覆盖模式：直接替换字段值
            data[field_name] = assign_value
        elif assign_mode == 'insert':
            # 插入模式：在指定位置插入新列
            if field_name not in data.columns:
                data[field_name] = assign_value
        elif assign_mode == 'split':
            # 拆分模式：将值按分隔符拆分到多个列
            delimiter = config.get('split_delimiter', ',')
            if field_name in data.columns:
                split_values = str(assign_value).split(delimiter)
                for i, value in enumerate(split_values):
                    new_col_name = f"{field_name}_{i+1}"
                    data[new_col_name] = value.strip()
        
        return data
    
    def _apply_transform_rule(self, data: pd.DataFrame, field_name: str, config: Dict) -> pd.DataFrame:
        """应用转换规则"""
        transform_type = config.get('transform_type', 'none')
        
        if field_name not in data.columns:
            return data
        
        if transform_type == 'uppercase':
            data[field_name] = data[field_name].astype(str).str.upper()
        elif transform_type == 'lowercase':
            data[field_name] = data[field_name].astype(str).str.lower()
        elif transform_type == 'trim':
            data[field_name] = data[field_name].astype(str).str.strip()
        elif transform_type == 'replace':
            old_value = config.get('old_value', '')
            new_value = config.get('new_value', '')
            data[field_name] = data[field_name].astype(str).str.replace(old_value, new_value)
        elif transform_type == 'regex':
            pattern = config.get('regex_pattern', '')
            replacement = config.get('regex_replacement', '')
            if pattern:
                data[field_name] = data[field_name].astype(str).str.replace(pattern, replacement, regex=True)
        elif transform_type == 'format':
            format_template = config.get('format_template', '{value}')
            data[field_name] = data[field_name].apply(lambda x: format_template.format(value=x))
        
        return data
    
    def generate_multiple_files(self, data: pd.DataFrame, split_field: str = None, file_configs: Dict = None) -> Dict[str, pd.DataFrame]:
        """根据指定字段值或配置生成多个文件"""
        if split_field and split_field in data.columns:
            # 按字段值分割文件
            files = {}
            unique_values = data[split_field].unique()
            
            for value in unique_values:
                if pd.notna(value):
                    filtered_data = data[data[split_field] == value]
                    files[str(value)] = filtered_data
            
            return files
        
        elif file_configs:
            # 按配置规则生成文件
            files = {}
            for file_name, config in file_configs.items():
                filter_conditions = config.get('filter_conditions', [])
                filtered_data = data.copy()
                
                # 应用过滤条件
                for condition in filter_conditions:
                    field = condition.get('field')
                    operator = condition.get('operator', '==')
                    value = condition.get('value')
                    
                    if field in filtered_data.columns:
                        if operator == '==':
                            filtered_data = filtered_data[filtered_data[field] == value]
                        elif operator == '!=':
                            filtered_data = filtered_data[filtered_data[field] != value]
                        elif operator == 'contains':
                            filtered_data = filtered_data[filtered_data[field].astype(str).str.contains(str(value), na=False)]
                        elif operator == 'not_contains':
                            filtered_data = filtered_data[~filtered_data[field].astype(str).str.contains(str(value), na=False)]
                
                files[file_name] = filtered_data
            
            return files
        
        else:
            return {'default': data}
    
    def to_excel_structure_json(self) -> str:
        """将Excel结构转换为JSON字符串"""
        return json.dumps(self.structure, ensure_ascii=False, indent=2)
    
    def process_template(self, source_data: pd.DataFrame, field_configs: Dict[str, Any], file_generation_config: Dict = None) -> Dict[str, pd.DataFrame]:
        """处理模板：应用字段规则并生成文件"""
        # 应用字段配置规则
        processed_data = self.apply_field_rules(field_configs, source_data)
        
        # 生成文件
        if file_generation_config:
            generation_type = file_generation_config.get('type', 'single')
            
            if generation_type == 'split_by_field':
                split_field = file_generation_config.get('split_field')
                return self.generate_multiple_files(processed_data, split_field=split_field)
            elif generation_type == 'custom_rules':
                file_configs = file_generation_config.get('file_configs', {})
                return self.generate_multiple_files(processed_data, file_configs=file_configs)
        
        return {'output': processed_data}
    
    def save_files_to_excel(self, files_data: Dict[str, pd.DataFrame], output_dir: str, base_filename: str = 'output') -> List[str]:
        """将生成的文件保存为Excel格式"""
        import os
        saved_files = []
        
        for file_key, data in files_data.items():
            if len(files_data) == 1:
                filename = f"{base_filename}.xlsx"
            else:
                filename = f"{base_filename}_{file_key}.xlsx"
            
            filepath = os.path.join(output_dir, filename)
            
            # 确保目录存在
            os.makedirs(output_dir, exist_ok=True)
            
            # 保存Excel文件
            data.to_excel(filepath, index=False)
            saved_files.append(filepath)
        
        return saved_files
    
    def close(self):
        """关闭工作簿"""
        if self.workbook:
            self.workbook.close()