"""
Excel处理模块 - 完整修复版本
负责Excel文件的读取、处理和导出
"""

import os
import logging
import tempfile
import shutil
from typing import List, Dict, Any, Optional, Tuple, Union
import pandas as pd
import numpy as np
from openpyxl import load_workbook
from openpyxl.utils.dataframe import dataframe_to_rows

logger = logging.getLogger(__name__)


class ExcelHandler:
    """Excel文件处理器 - 完整修复版本"""

    def __init__(self):
        self.df: Optional[pd.DataFrame] = None
        self.original_df: Optional[pd.DataFrame] = None  # 保存原始数据备份
        self.column_names: List[str] = []
        self.processed_data: List[Dict] = []
        self.file_path: Optional[str] = None
        self.sheet_names: List[str] = []
        self.current_sheet: str = ""
        self._temp_dir: Optional[str] = None

    def __del__(self):
        """析构函数 - 清理临时文件"""
        self._cleanup_temp_files()

    def _cleanup_temp_files(self):
        """清理临时文件"""
        try:
            if self._temp_dir and os.path.exists(self._temp_dir):
                shutil.rmtree(self._temp_dir)
                logger.info(f"清理临时目录: {self._temp_dir}")
        except Exception as e:
            logger.warning(f"清理临时文件失败: {e}")

    def _create_temp_dir(self) -> str:
        """创建临时目录"""
        if self._temp_dir is None:
            self._temp_dir = tempfile.mkdtemp(prefix="excel_handler_")
        return self._temp_dir

    def load_excel_file(self, file_path: str, sheet_name: Optional[str] = None,
                       start_row: int = 0, header_row: int = 0) -> Tuple[bool, str]:
        """加载Excel文件 - 完整修复版本"""
        try:
            if not os.path.exists(file_path):
                return False, f"文件不存在: {file_path}"

            # 验证文件格式
            if not self._validate_excel_file(file_path):
                return False, "文件格式不支持或已损坏"

            # 备份文件路径
            self.file_path = file_path

            # 获取所有工作表名称
            try:
                workbook = load_workbook(file_path, read_only=True, data_only=True)
                self.sheet_names = workbook.sheetnames
                workbook.close()
            except Exception as e:
                logger.warning(f"获取工作表列表失败: {e}")
                self.sheet_names = ["Sheet1"]

            # 确定要加载的工作表
            target_sheet = sheet_name
            if not target_sheet:
                if self.sheet_names:
                    target_sheet = self.sheet_names[0]
                else:
                    return False, "Excel文件中没有工作表"

            if target_sheet not in self.sheet_names:
                return False, f"工作表 '{target_sheet}' 不存在"

            self.current_sheet = target_sheet

            # 读取Excel文件
            read_kwargs = {
                'sheet_name': target_sheet,
                'engine': 'openpyxl',
                'header': header_row
            }

            if start_row > 0:
                read_kwargs['skiprows'] = range(0, start_row)

            self.df = pd.read_excel(file_path, **read_kwargs)

            # 处理多级表头
            if isinstance(self.df.columns, pd.MultiIndex):
                self.df.columns = ['_'.join(map(str, col)).strip() for col in self.df.columns]

            # 确保所有列名都是字符串且唯一
            self.df.columns = self._make_column_names_unique(
                [str(col).strip() for col in self.df.columns]
            )
            self.column_names = self.df.columns.tolist()

            # 备份原始数据
            self.original_df = self.df.copy()

            # 数据基本信息
            file_size = os.path.getsize(file_path) / (1024 * 1024)  # MB
            logger.info(f"成功加载Excel文件: {file_path}")
            logger.info(f"工作表: {target_sheet}, 尺寸: {len(self.df)}行 x {len(self.column_names)}列")
            logger.info(f"文件大小: {file_size:.2f}MB")

            return True, f"成功加载文件，共 {len(self.df)} 行 {len(self.column_names)} 列数据"

        except Exception as e:
            logger.error(f"加载Excel文件失败: {e}")
            return False, f"加载失败: {str(e)}"

    def _validate_excel_file(self, file_path: str) -> bool:
        """验证Excel文件"""
        try:
            # 检查文件扩展名
            valid_extensions = {'.xlsx', '.xls', '.xlsm'}
            file_ext = os.path.splitext(file_path)[1].lower()
            if file_ext not in valid_extensions:
                return False

            # 检查文件大小（限制为100MB）
            max_size_mb = 100
            file_size_mb = os.path.getsize(file_path) / (1024 * 1024)
            if file_size_mb > max_size_mb:
                logger.error(f"文件过大: {file_size_mb:.2f}MB > {max_size_mb}MB")
                return False

            # 尝试打开文件验证格式
            try:
                workbook = load_workbook(file_path, read_only=True)
                workbook.close()
            except Exception as e:
                logger.error(f"Excel文件格式验证失败: {e}")
                return False

            return True

        except Exception as e:
            logger.error(f"验证Excel文件失败: {e}")
            return False

    def _make_column_names_unique(self, columns: List[str]) -> List[str]:
        """确保列名唯一"""
        seen = {}
        unique_columns = []

        for col in columns:
            if not col or col.strip() == "":
                col = "Unnamed"

            if col in seen:
                seen[col] += 1
                unique_columns.append(f"{col}_{seen[col]}")
            else:
                seen[col] = 0
                unique_columns.append(col)

        return unique_columns

    def process_data(self, configs: List[Dict], start_row: int = 0,
                    filters: Optional[Dict[str, Any]] = None) -> Tuple[bool, str]:
        """处理Excel数据 - 完整修复版本"""
        try:
            if self.df is None:
                return False, "请先加载Excel文件"

            # 验证配置
            if not configs:
                return False, "请配置输入项"

            # 验证配置中的列是否存在
            missing_columns = []
            for config in configs:
                column_name = str(config.get('column_name', ''))
                if column_name and column_name not in self.column_names:
                    missing_columns.append(column_name)

            if missing_columns:
                return False, f"以下列不存在: {', '.join(missing_columns)}"

            # 处理开始行
            if start_row < 0:
                start_row = 0
            elif start_row >= len(self.df):
                return False, f"开始行 {start_row} 超出数据范围"

            # 应用过滤器
            df_filtered = self.df.iloc[start_row:].copy()

            if filters:
                df_filtered = self._apply_filters(df_filtered, filters)

            if len(df_filtered) == 0:
                return False, "过滤后没有数据"

            # 处理数据
            self.processed_data = []
            processed_count = 0
            error_count = 0

            for index, row in df_filtered.iterrows():
                try:
                    row_data = {
                        '__row_number': index + 2,  # Excel行号（从1开始，加上表头）
                        '__original_index': index
                    }

                    # 为每个配置项添加数据
                    for config in configs:
                        column_name = config['column_name']
                        if column_name in df_filtered.columns:
                            value = row[column_name]

                            # 处理特殊值
                            value = self._clean_value(value, config)

                            # 应用转换函数
                            value = self._apply_transformations(value, config)

                            row_data[column_name] = value

                    self.processed_data.append(row_data)
                    processed_count += 1

                except Exception as e:
                    logger.warning(f"处理行 {index} 失败: {e}")
                    error_count += 1
                    continue

            logger.info(f"处理完成: {processed_count} 行成功, {error_count} 行失败")

            if error_count > 0:
                return True, f"处理完成: {processed_count} 行成功, {error_count} 行失败"
            else:
                return True, f"处理完成，共 {processed_count} 行数据"

        except Exception as e:
            logger.error(f"处理数据失败: {e}")
            return False, f"处理失败: {str(e)}"

    def _clean_value(self, value: Any, config: Dict[str, Any]) -> Any:
        """清理数据值"""
        try:
            # 处理NaN值
            if pd.isna(value):
                return config.get('default_value', "")

            # 处理无穷大值
            if hasattr(value, '__iter__') and not isinstance(value, str):
                return value

            if np.isinf(value) if hasattr(value, 'dtype') else False:
                return config.get('default_value', 0)

            # 字符串处理
            if isinstance(value, str):
                value = value.strip()
                if config.get('remove_whitespace', True):
                    value = ' '.join(value.split())

            # 类型转换
            target_type = config.get('data_type')
            if target_type:
                try:
                    if target_type == 'int':
                        value = int(float(value)) if value not in ['', None] else 0
                    elif target_type == 'float':
                        value = float(value) if value not in ['', None] else 0.0
                    elif target_type == 'str':
                        value = str(value) if value not in [None] else ""
                    elif target_type == 'bool':
                        if isinstance(value, str):
                            value = value.lower() in ('true', '1', 'yes', 'y', '是')
                        else:
                            value = bool(value)
                except (ValueError, TypeError):
                    value = config.get('default_value', "")

            return value

        except Exception as e:
            logger.warning(f"清理值失败: {e}")
            return config.get('default_value', "")

    def _apply_transformations(self, value: Any, config: Dict[str, Any]) -> Any:
        """应用数据转换"""
        try:
            transformations = config.get('transformations', [])

            for transform in transformations:
                transform_type = transform.get('type')

                if transform_type == 'replace':
                    # 字符串替换
                    if isinstance(value, str):
                        old_val = transform.get('old_value', '')
                        new_val = transform.get('new_value', '')
                        value = value.replace(old_val, new_val)

                elif transform_type == 'regex':
                    # 正则表达式替换
                    if isinstance(value, str):
                        import re
                        pattern = transform.get('pattern', '')
                        replacement = transform.get('replacement', '')
                        value = re.sub(pattern, replacement, value)

                elif transform_type == 'math':
                    # 数学运算
                    try:
                        operation = transform.get('operation')
                        operand = transform.get('operand', 0)

                        if operation == 'add':
                            value = float(value) + operand
                        elif operation == 'subtract':
                            value = float(value) - operand
                        elif operation == 'multiply':
                            value = float(value) * operand
                        elif operation == 'divide':
                            if operand != 0:
                                value = float(value) / operand
                    except (ValueError, TypeError):
                        pass

            return value

        except Exception as e:
            logger.warning(f"应用转换失败: {e}")
            return value

    def _apply_filters(self, df: pd.DataFrame, filters: Dict[str, Any]) -> pd.DataFrame:
        """应用数据过滤器"""
        try:
            filtered_df = df.copy()

            for column, filter_config in filters.items():
                if column not in df.columns:
                    continue

                filter_type = filter_config.get('type')
                filter_value = filter_config.get('value')

                if filter_type == 'equals':
                    filtered_df = filtered_df[filtered_df[column] == filter_value]
                elif filter_type == 'not_equals':
                    filtered_df = filtered_df[filtered_df[column] != filter_value]
                elif filter_type == 'contains':
                    filtered_df = filtered_df[filtered_df[column].astype(str).str.contains(str(filter_value), na=False)]
                elif filter_type == 'greater_than':
                    filtered_df = filtered_df[filtered_df[column] > filter_value]
                elif filter_type == 'less_than':
                    filtered_df = filtered_df[filtered_df[column] < filter_value]
                elif filter_type == 'between':
                    min_val = filter_config.get('min')
                    max_val = filter_config.get('max')
                    filtered_df = filtered_df[(filtered_df[column] >= min_val) & (filtered_df[column] <= max_val)]
                elif filter_type == 'in_list':
                    value_list = filter_config.get('values', [])
                    filtered_df = filtered_df[filtered_df[column].isin(value_list)]
                elif filter_type == 'not_null':
                    filtered_df = filtered_df[~filtered_df[column].isna()]
                elif filter_type == 'is_null':
                    filtered_df = filtered_df[filtered_df[column].isna()]

            return filtered_df

        except Exception as e:
            logger.error(f"应用过滤器失败: {e}")
            return df

    def save_processed_data(self, output_path: str, configs: List[Dict],
                           include_original: bool = False) -> Tuple[bool, str]:
        """保存处理后的数据 - 完整修复版本"""
        try:
            if not self.processed_data:
                return False, "没有处理后的数据可保存"

            # 验证输出路径
            output_dir = os.path.dirname(output_path)
            if output_dir and not os.path.exists(output_dir):
                os.makedirs(output_dir, exist_ok=True)

            # 检查文件是否已存在
            if os.path.exists(output_path):
                # 创建备份
                backup_path = f"{output_path}.backup"
                try:
                    shutil.copy2(output_path, backup_path)
                    logger.info(f"已创建备份: {backup_path}")
                except Exception as e:
                    logger.warning(f"创建备份失败: {e}")

            # 创建输出DataFrame
            output_data = []
            for row in self.processed_data:
                output_row = {'行号': row['__row_number']}

                # 添加配置的列
                for config in configs:
                    column_name = config['column_name']
                    if column_name in row:
                        output_row[column_name] = row[column_name]

                # 可选包含原始数据
                if include_original and self.original_df is not None:
                    original_index = row.get('__original_index')
                    if original_index is not None and original_index in self.original_df.index:
                        original_row = self.original_df.loc[original_index]
                        for col in self.original_df.columns:
                            output_row[f'原始_{col}'] = original_row[col]

                output_data.append(output_row)

            output_df = pd.DataFrame(output_data)

            # 使用临时文件保存，避免写入中断
            temp_dir = self._create_temp_dir()
            temp_file = os.path.join(temp_dir, "temp_output.xlsx")

            with pd.ExcelWriter(temp_file, engine='openpyxl') as writer:
                output_df.to_excel(writer, sheet_name='处理数据', index=False)

                # 添加摘要工作表
                self._add_summary_sheet(writer, configs, len(output_df))

                # 添加原始数据工作表（如果请求）
                if include_original and self.original_df is not None:
                    self.original_df.to_excel(writer, sheet_name='原始数据', index=False)

            # 移动到最终位置
            shutil.move(temp_file, output_path)

            logger.info(f"数据已保存到: {output_path}")
            return True, f"数据已保存到: {output_path}"

        except Exception as e:
            logger.error(f"保存数据失败: {e}")
            # 清理临时文件
            try:
                if os.path.exists(temp_file):
                    os.unlink(temp_file)
            except:
                pass
            return False, f"保存失败: {str(e)}"

    def _add_summary_sheet(self, writer, configs: List[Dict], processed_count: int):
        """添加摘要工作表"""
        try:
            workbook = writer.book
            summary_sheet = workbook.create_sheet("摘要")

            # 摘要信息
            summary_data = [
                ["处理摘要", ""],
                ["处理时间", pd.Timestamp.now().strftime("%Y-%m-%d %H:%M:%S")],
                ["原始文件", self.file_path],
                ["工作表", self.current_sheet],
                ["处理数据量", f"{processed_count} 行"],
                ["配置列数", f"{len(configs)} 列"],
                ["", ""],
                ["配置详情", ""]
            ]

            # 配置详情
            for i, config in enumerate(configs, 1):
                summary_data.append([f"列 {i}", config.get('column_name', '')])
                if 'data_type' in config:
                    summary_data.append(["数据类型", config['data_type']])
                if 'default_value' in config:
                    summary_data.append(["默认值", str(config['default_value'])])
                summary_data.append(["", ""])

            # 写入数据
            for row_idx, row_data in enumerate(summary_data, 1):
                for col_idx, value in enumerate(row_data, 1):
                    summary_sheet.cell(row=row_idx, column=col_idx, value=value)

        except Exception as e:
            logger.warning(f"添加摘要工作表失败: {e}")

    def get_column_names(self) -> List[str]:
        """获取列名"""
        return self.column_names.copy()

    def get_data_sample(self, count: int = 5) -> List[Dict]:
        """获取数据样本"""
        if not self.processed_data:
            return []
        return self.processed_data[:min(count, len(self.processed_data))]

    def get_original_data_sample(self, count: int = 5) -> List[Dict]:
        """获取原始数据样本"""
        if self.df is None:
            return []

        sample_data = []
        for i, (index, row) in enumerate(self.df.head(count).iterrows()):
            row_data = {'行号': index + 2}
            row_data.update(row.to_dict())
            sample_data.append(row_data)

        return sample_data

    def get_data_stats(self) -> Dict[str, Any]:
        """获取数据统计信息"""
        if self.df is None:
            return {}

        stats = {
            'total_rows': len(self.df),
            'total_columns': len(self.column_names),
            'column_names': self.column_names,
            'processed_rows': len(self.processed_data),
            'file_path': self.file_path,
            'current_sheet': self.current_sheet,
            'available_sheets': self.sheet_names
        }

        # 列统计
        if len(self.df) > 0:
            column_stats = {}
            for col in self.column_names:
                col_data = self.df[col]
                non_null_count = col_data.count()
                null_count = len(col_data) - non_null_count

                column_stats[col] = {
                    'non_null_count': non_null_count,
                    'null_count': null_count,
                    'null_percentage': (null_count / len(col_data)) * 100 if len(col_data) > 0 else 0,
                    'dtype': str(col_data.dtype)
                }

                # 数值列统计
                if pd.api.types.is_numeric_dtype(col_data):
                    column_stats[col].update({
                        'min': float(col_data.min()) if non_null_count > 0 else None,
                        'max': float(col_data.max()) if non_null_count > 0 else None,
                        'mean': float(col_data.mean()) if non_null_count > 0 else None,
                        'std': float(col_data.std()) if non_null_count > 0 else None
                    })

            stats['column_statistics'] = column_stats

        return stats

    def validate_column(self, column_name: str) -> Tuple[bool, str]:
        """验证列名是否存在"""
        if str(column_name) in self.column_names:
            return True, "列存在"
        else:
            return False, f"列 '{column_name}' 不存在"

    def clear_data(self) -> None:
        """清除数据"""
        self.df = None
        self.original_df = None
        self.column_names = []
        self.processed_data = []
        self.file_path = None
        self.sheet_names = []
        self.current_sheet = ""
        self._cleanup_temp_files()
        logger.info("已清除Excel数据")

    def export_to_csv(self, output_path: str, configs: List[Dict]) -> Tuple[bool, str]:
        """导出为CSV文件"""
        try:
            if not self.processed_data:
                return False, "没有处理后的数据可导出"

            # 创建输出DataFrame
            output_data = []
            for row in self.processed_data:
                output_row = {'行号': row['__row_number']}
                for config in configs:
                    column_name = config['column_name']
                    if column_name in row:
                        output_row[column_name] = row[column_name]
                output_data.append(output_row)

            output_df = pd.DataFrame(output_data)

            # 确保输出目录存在
            output_dir = os.path.dirname(output_path)
            if output_dir and not os.path.exists(output_dir):
                os.makedirs(output_dir, exist_ok=True)

            output_df.to_csv(output_path, index=False, encoding='utf-8-sig')

            logger.info(f"数据已导出为CSV: {output_path}")
            return True, f"数据已导出为CSV: {output_path}"

        except Exception as e:
            logger.error(f"导出CSV失败: {e}")
            return False, f"导出CSV失败: {str(e)}"

    def get_duplicate_rows(self, columns: List[str]) -> pd.DataFrame:
        """获取重复行"""
        if self.df is None or not columns:
            return pd.DataFrame()

        # 验证列存在
        valid_columns = [col for col in columns if col in self.df.columns]
        if not valid_columns:
            return pd.DataFrame()

        duplicates = self.df[self.df.duplicated(subset=valid_columns, keep=False)]
        return duplicates

    def remove_duplicates(self, columns: List[str], keep: str = 'first') -> Tuple[bool, str]:
        """移除重复行"""
        try:
            if self.df is None:
                return False, "没有数据可处理"

            # 验证列存在
            valid_columns = [col for col in columns if col in self.df.columns]
            if not valid_columns:
                return False, "指定的列不存在"

            original_count = len(self.df)
            self.df = self.df.drop_duplicates(subset=valid_columns, keep=keep)
            new_count = len(self.df)
            removed_count = original_count - new_count

            logger.info(f"移除重复行: {removed_count} 行被移除")
            return True, f"移除 {removed_count} 个重复行"

        except Exception as e:
            logger.error(f"移除重复行失败: {e}")
            return False, f"移除重复行失败: {str(e)}"