import pandas as pd
import numpy as np
from typing import Optional, Union, List, Dict, Any
import logging
from pathlib import Path
import modin.pandas as mpd  # 用于并行处理
import dask.dataframe as dd  # 用于处理超大数据集
import pyarrow as pa  # 用于高效内存管理
from concurrent.futures import ThreadPoolExecutor
import gc

class ExcelReader:
    """高性能Excel大数据读取器"""

    def __init__(
        self,
        chunk_size: int = 100000,
        use_modin: bool = False,
        use_dask: bool = False,
        memory_limit: str = '4GB'
    ):
        """
        初始化Excel读取器

        Args:
            chunk_size: 分块读取的大小
            use_modin: 是否使用modin进行并行处理
            use_dask: 是否使用dask处理超大数据集
            memory_limit: 内存限制
        """
        self.chunk_size = chunk_size
        self.use_modin = use_modin
        self.use_dask = use_dask
        self.memory_limit = memory_limit
        self.logger = self._setup_logger()

    def _setup_logger(self) -> logging.Logger:
        """设置日志"""
        logger = logging.getLogger(__name__)
        logger.setLevel(logging.INFO)
        handler = logging.StreamHandler()
        handler.setFormatter(
            logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        )
        logger.addHandler(handler)
        return logger

    def read_excel_chunks(
        self,
        file_path: Union[str, Path],
        sheet_name: Optional[str] = None,
        usecols: Optional[List[str]] = None,
        dtype: Optional[Dict[str, Any]] = None,
        **kwargs
    ) -> pd.DataFrame:
        """
        分块读取Excel文件

        Args:
            file_path: Excel文件路径
            sheet_name: 工作表名称
            usecols: 需要读取的列
            dtype: 列数据类型
            **kwargs: 其他pandas读取参数

        Returns:
            DataFrame对象
        """
        try:
            chunks = []
            total_rows = 0

            # 使用ExcelFile对象更高效地读取
            with pd.ExcelFile(file_path) as xls:
                if sheet_name is None:
                    sheet_name = xls.sheet_names[0]

                # 获取总行数
                sheet = xls.book.sheet_by_name(sheet_name)
                total_rows = sheet.nrows

                # 计算需要的chunk数
                num_chunks = (total_rows - 1) // self.chunk_size + 1

                self.logger.info(f"开始读取文件: {file_path}")
                self.logger.info(f"总行数: {total_rows}, 分块数: {num_chunks}")

                # 分块读取
                for i in range(num_chunks):
                    start_row = i * self.chunk_size
                    end_row = min((i + 1) * self.chunk_size, total_rows)

                    chunk = pd.read_excel(
                        xls,
                        sheet_name=sheet_name,
                        skiprows=range(1, start_row + 1) if start_row > 0 else None,
                        nrows=end_row - start_row,
                        usecols=usecols,
                        dtype=dtype,
                        **kwargs
                    )

                    chunks.append(chunk)
                    self.logger.info(f"已读取 {end_row}/{total_rows} 行")

                    # 垃圾回收
                    gc.collect()

            # 合并所有chunks
            df = pd.concat(chunks, ignore_index=True)
            self.logger.info("文件读取完成")
            return df

        except Exception as e:
            self.logger.error(f"读取Excel文件失败: {str(e)}")
            raise

    def read_excel_dask(
        self,
        file_path: Union[str, Path],
        **kwargs
    ) -> dd.DataFrame:
        """
        使用dask读取超大Excel文件

        Args:
            file_path: Excel文件路径
            **kwargs: 其他读取参数

        Returns:
            Dask DataFrame对象
        """
        try:
            # 先将Excel转换为parquet格式
            temp_parquet = f"{Path(file_path).stem}_temp.parquet"

            self.logger.info("转换Excel为parquet格式...")
            df = self.read_excel_chunks(file_path, **kwargs)
            df.to_parquet(temp_parquet)

            # 使用dask读取parquet文件
            ddf = dd.read_parquet(temp_parquet)
            self.logger.info("文件读取完成")
            return ddf

        except Exception as e:
            self.logger.error(f"使用dask读取文件失败: {str(e)}")
            raise

    def read_excel_modin(
        self,
        file_path: Union[str, Path],
        **kwargs
    ) -> mpd.DataFrame:
        """
        使用modin并行读取Excel文件

        Args:
            file_path: Excel文件路径
            **kwargs: 其他读取参数

        Returns:
            Modin DataFrame对象
        """
        try:
            self.logger.info("使用modin并行读取文件...")
            return mpd.read_excel(file_path, **kwargs)
        except Exception as e:
            self.logger.error(f"使用modin读取文件失败: {str(e)}")
            raise

    def optimize_dtypes(self, df: pd.DataFrame) -> pd.DataFrame:
        """
        优化DataFrame的数据类型以减少内存使用

        Args:
            df: 输入DataFrame

        Returns:
            优化后的DataFrame
        """
        for col in df.columns:
            col_type = df[col].dtype

            if col_type == 'object':
                # 尝试转换为category类型
                if df[col].nunique() / len(df[col]) < 0.5:
                    df[col] = df[col].astype('category')

            elif col_type.name.startswith('int'):
                # 优化整数类型
                c_min = df[col].min()
                c_max = df[col].max()

                if c_min >= 0:
                    if c_max < 255:
                        df[col] = df[col].astype(np.uint8)
                    elif c_max < 65535:
                        df[col] = df[col].astype(np.uint16)
                    elif c_max < 4294967295:
                        df[col] = df[col].astype(np.uint32)
                else:
                    if c_min > -128 and c_max < 127:
                        df[col] = df[col].astype(np.int8)
                    elif c_min > -32768 and c_max < 32767:
                        df[col] = df[col].astype(np.int16)
                    elif c_min > -2147483648 and c_max < 2147483647:
                        df[col] = df[col].astype(np.int32)

            elif col_type.name.startswith('float'):
                # 优化浮点数类型
                df[col] = pd.to_numeric(df[col], downcast='float')

        return df

    def get_memory_usage(self, df: pd.DataFrame) -> str:
        """获取DataFrame的内存使用情况"""
        memory_usage = df.memory_usage(deep=True).sum()
        return f"{memory_usage / 1024 / 1024:.2f} MB"
