import logging
import traceback
import uuid
import pickle
import os
import chardet
import pandas as pd

from typing import List
from python_calamine import CalamineWorkbook
from PySide6.QtCore import QThread, Signal
from .first_row_reader import FirstRowReader

logging.basicConfig(
    format='%(asctime)s %(levelname)s %(filename)s line%(lineno)d: %(message)s',
    level=logging.INFO)
logger = logging.getLogger(__name__)


class PackReader(QThread):

    except_signal = Signal(str)
    progress = Signal(int)
    file_name = Signal(bytes)

    def __init__(
        self, 
        selected_files: list,
        device_config: dict,
        bms_config: dict,
        sheet_name: str = "",
        file_type: str = "csv",
        parent=None
    ):
        """Pack数据读取
        Args:
            selected_files: 被选中的需要读取的csv文件列表
            device_config (dict): 配置字典，从配置文件中加载
            bms_config: 列映射字典，其实从bms页加载
            sheet_name: 仅当读取xlsx文件时候需要
            file_type: 默认读取csv类型格式文件，当读取xlsx类型时候，sheet_name就起作用了
            parent (QObject): 父对象，默认为None

        Returns:
            从众多csv文件中，仅仅提取关键列数据，将合并后的csv文件，写入到本地data目录下，返回文件名，供
            下一步的数据筛选计算使用
        """
        super().__init__(parent)
        self.selected_files = selected_files
        self.device_config = device_config
        self.bms_config = bms_config
        self.sheet_name = sheet_name
        self.file_type = file_type
        self.column_names = self._read_usecols_from_config()

    def _read_usecols_from_config(self) -> List[str]:
        """从数据处理方案中获取需要读取的列"""
        use_columns = []
        # 设备配置里面，本身需要读取的列
        for tag, column in self.device_config.items():
            if tag in ['name', 'ch_key', 'dch_key', 'stew_key']:
                continue
            if column:
                use_columns.extend(column.split("、"))
        # BMS信号配置里面需要读取的列
        for tag, column in self.bms_config.items():
            if column:
                use_columns.extend(column.split("、"))
        result = list(set(use_columns))
        new_result = [item.strip() for item in result]
        return new_result

    def run(self):
        self.progress.emit(0)
        try:
            if self.file_type.lower() == "csv":
                self.merge_csv_columns()
            else:
                self.merge_xlsx_columns()
        except Exception as e:
            # 获取错误的详细信息并发送信号
            error_message = traceback.format_exc().strip()
            self.except_signal.emit(error_message)
            raise e
        self.progress.emit(100)

    def _detect_encoding(self, file_path):
        """
        使用chardet检测文件编码
        
        参数:
            file_path (str): 文件路径
            
        返回:
            str: 检测到的编码，默认返回'utf-8'
        """
        try:
            # 读取文件的前10000字节用于检测编码
            with open(file_path, 'rb') as f:
                raw_data = f.read(10000)
            # 使用chardet检测编码
            result = chardet.detect(raw_data)
            encoding = result['encoding']
            # 如果chardet无法确定编码，使用utf-8作为 fallback
            if encoding is None:
                logger.warning.warn(f"chardet无法确定文件 {file_path} 的编码，将使用默认编码'utf-8'")
                return 'utf-8'   
            return encoding

        except Exception as e:
            logger.warning.warn(f"检测文件 {file_path} 编码时出错: {str(e)}，将使用默认编码'utf-8'")
            return 'utf-8'

    def merge_xlsx_columns(self):
        data_frames = []
        sheet_name = self.sheet_name
        total_count = len(self.selected_files)
        count = 0
        cleaned_columns = []
        for path in self.selected_files:
            count += 1
            sheet_names = self.get_sheet_names(path)
            raw_columns = []
            if sheet_name != "" and sheet_name not in sheet_names:
                raise ValueError(f"{path}, 缺少目标工作表{sheet_name}\n 只有表：{sheet_names}")
            else:
                analyzer = FirstRowReader(path)
                if sheet_name == "":
                    raw_columns = analyzer.read_lastheader()
                else:
                    raw_columns = analyzer.read_header_by_sheetname(sheet_name)
            cleaned_columns = [item for item in raw_columns if item != ""]
            # 检查每个文件里面是否含有规定的列名
            missing_columns = set(self.column_names) - set(cleaned_columns)
            if missing_columns:
                raise ValueError(
                    f"文件名: {os.path.basename(path)}\n"
                    f"需要的列名: {missing_columns}\n"
                    f"已有的列名: {cleaned_columns}"
                )

            df = pd.read_excel(
                path,
                sheet_name=sheet_name,
                engine='calamine',
                usecols=self.column_names
            )
            # pd.read_excel的顺序调整
            df = df[self.column_names]

            data_frames.append(df)
            self.progress.emit(int(count/total_count * 100.0))

        if not data_frames:
            raise ValueError("没有成功读取任何有效的xlsx文件，无法进行合并")

        df = pd.concat(data_frames, ignore_index=True)

        # 添加状态列
        status_columns = self.device_config['status'].split("、")
        df = df.rename(columns={self.device_config['step']: '工步'})
        if len(status_columns) == 1:
            df = df.rename(columns={status_columns[0]: '状态'})
        else:
            df['状态'] = df[status_columns].astype(str).agg(''.join, axis=1)

        # 根据BMS信号重新命名df
        self.rename_df(df)
        self.add_status(df)
        self.file_name.emit(pickle.dumps(df))

    def get_sheet_names(self, path):
        wb = CalamineWorkbook.from_object(path)
        return wb.sheet_names

    def merge_csv_columns(self):
        data_frames = []
        total_count = len(self.selected_files)
        count = 0
        for path in self.selected_files:
            count += 1
            # 检测文件编码
            encoding = self._detect_encoding(path)

            # 第一步：读取CSV文件的列名并清理
            with open(path, 'r', encoding=encoding) as f:
                first_line = f.readline().strip()
                # 读取列名并清理前后的空格行
                raw_columns = first_line.split(',')
                cleaned_columns = [col.strip() for col in raw_columns]
            # 仅当第一个文件，检查是不是所有需要的列都存在于文件中
            if count == 1:
                missing_columns = set(self.column_names) - set(cleaned_columns)
                if missing_columns:
                    raise ValueError(
                        f"以下列在 column_names 中但不在数据文件中: {sorted(missing_columns)}\n"
                        f"文件名: {os.path.basename(path)}"
                    )
            # 第二步：使用清理后的列名读取CSV文件
            df = pd.read_csv(
                path, 
                usecols=self.column_names,
                encoding=encoding,
                on_bad_lines='skip',  # 跳过有问题的行
                header=0,  # 明确指定使用第一行作为列名
                names=cleaned_columns  # 使用清理后的列名
            )
            
            # 确保只保留我们需要的列
            df = df[self.column_names]
            
            data_frames.append(df)
            self.progress.emit(int(count/total_count * 100.0))

        if not data_frames:
            raise ValueError("没有成功读取任何有效的CSV文件，无法进行合并")

        df = pd.concat(data_frames, ignore_index=True)

        # 添加状态列
        status_columns = self.device_config['status'].split("、")
        df = df.rename(columns={self.device_config['step']: '工步'})
        if len(status_columns) == 1:
            df = df.rename(columns={status_columns[0]: '状态'})
        else:
            df['状态'] = df[status_columns].astype(str).agg(''.join, axis=1)
        # 根据BMS信号重新命名df TODO 需要检查BMS信号是否有相同列名的
        self.rename_df(df)
        self.add_status(df)
        self.file_name.emit(pickle.dumps(df))

    def add_status(self, df) -> None:
        """给dataframe添加状态，原本的运行模式或者工步名称并不好判断到底是充电放电"""
        charge_columns = self.device_config['ch_key'].split("、")
        discharge_columns = self.device_config['dch_key'].split("、")
        stew_columns = self.device_config['stew_key'].split("、")
        df.loc[df["状态"].astype(str).str.
                contains('|'.join(charge_columns)), 'status'] = "充电"
        df.loc[df["状态"].astype(str).str.
                contains('|'.join(discharge_columns)), 'status'] = "放电"
        df.loc[df["状态"].astype(str).str.
                contains('|'.join(stew_columns)), 'status'] = "静置"

    def rename_df(self, df: pd.DataFrame):
        # 存储已处理的原始列名，避免重复重命名
        processed_original_cols = set()
        
        # 1. 处理device_config的映射
        device_mappings = [
            (self.device_config['step'], "工步"),
            (self.device_config['ch_capacity'], "充电容量"),
            (self.device_config['dch_capacity'], "放电容量"),
            (self.device_config['ch_energy'], "充电能量"),
            (self.device_config['dch_energy'], "放电能量")
        ]

        for original_col, target_col in device_mappings:
            if original_col not in processed_original_cols:
                # 第一次遇到该原始列，直接重命名
                df.rename(columns={original_col: target_col}, inplace=True)
                processed_original_cols.add(original_col)
            else:
                # 已处理过的原始列，通过复制现有列创建新列
                # 这里需要找到原始列被重命名后的列名
                renamed_col = next(col for col in df.columns 
                                if (original_col in processed_original_cols 
                                    and any(oc == original_col for oc, tc in device_mappings if tc == col)))
                df[target_col] = df[renamed_col].copy()  # 显式复制，避免视图关联

        # 2. 处理bms_config的映射（逻辑同上）
        for target_col, original_col in self.bms_config.items():
            if original_col not in df.columns:
                continue
            if original_col not in processed_original_cols:
                df.rename(columns={original_col: target_col}, inplace=True)
                processed_original_cols.add(original_col)
            else:
                renamed_col = next(col for col in df.columns 
                                if any(oc == original_col for oc, tc in device_mappings if tc == col) 
                                or (original_col in self.bms_config.values() and any(v == original_col for k, v in self.bms_config.items() if k == col)))
                df[target_col] = df[renamed_col].copy()


    @staticmethod
    def generate_random_filename(extension=None):
        """
        生成随机文件名
        
        Args:
            extension (str): 文件扩展名，如 'txt', 'jpg' 等
            
        Returns:
            str: 随机生成的文件名
        """
        random_name = str(uuid.uuid4())
        
        if extension:
            # 确保扩展名以点开头
            if not extension.startswith('.'):
                extension = '.' + extension
            return random_name + extension
        
        return random_name
