import logging
import traceback
import os
import pandas as pd
import numpy as np

from datetime import datetime
from PySide6.QtCore import QThread, Signal
from utils.statics import stastics

logging.basicConfig(
    format='%(asctime)s %(levelname)s %(filename)s line%(lineno)d: %(message)s',
    level=logging.INFO)
logger = logging.getLogger(__name__)

OUTPUT_DIR = "data"

class DataProcessor(QThread):

    except_signal = Signal(str)
    progress = Signal(int)

    def __init__(
        self, df: pd.DataFrame, device_config: dict, ouput_indicats: list, small_cycle: list, parent=None
    ):
        """Pack数据读取
        Args:
            df: 经过第一步合并过后的dataframe，通常大小在几Mb到十几Mb不会太大
            config (dict): 配置字典，从配置文件中加载
            parent (QObject): 父对象，默认为None

        Returns:
            从众多csv文件中，仅仅提取关键列数据，将合并后的csv文件，写入到本地data目录下，返回文件名，供
            下一步的数据筛选计算使用
        """
        super().__init__(parent)
        self.df = df
        self.device_config = device_config
        self.small_cycle = small_cycle
        new_config = self.sort_outputs(ouput_indicats)
        self.ouput_indicats = new_config
        self.max_cycle = 1

    @staticmethod
    def sort_outputs(ouput_indicats: list):
        base_columns = []
        charge_columns = []
        discharge_columns = []

        for item in ouput_indicats:
            column_name = item["column"]
            status = item["status"]
            method = item["method"]

            # 构建完整的列名
            if column_name in ["SOC", "最大温度", "最小温度", "最大电压", "最小电压"]:
                full_name = f"{column_name}_{method}"
            elif column_name in ["最大电压节数", "最小电压节数"]:
                full_name = f"{column_name}_{method}"
            else:
                full_name = column_name

            if status == "充电":
                charge_columns.append({"config": item, "full_name": full_name})
            elif status == "放电":
                discharge_columns.append({"config": item,"full_name": full_name})
            else:
                base_columns.append({"config": item, "full_name": full_name})

        # 排序
        charge_columns = sorted(charge_columns, key=lambda x:x["full_name"])
        discharge_columns = sorted(discharge_columns, key=lambda x:x["full_name"])
        total_columns = base_columns + charge_columns + discharge_columns
        new_config = [item['config'] for item in total_columns]
        return new_config

    def get_cycles(self):
        count = 0
        # 在数据处理之前，先进行计算
        self.progress.emit(2)
        self.calculate_df()
        self.progress.emit(10)
        for _, group, former_lastrow in self.split_df():
            count += 1
            self.progress.emit(int(count/self.max_cycle * 90.0 + 10))
            yield self.calculate_metric(former_lastrow, group)

    def calculate_metric(self, former_lastrow, df):
        """
        根据单个配置项，从DataFrame中计算一个指标的值。
        
        Parameters:
        -----------
        df : pandas.DataFrame
            当前大循环的原始数据。
        config : dict
            单个指标的配置字典。Example:
            {
                "column": "充电容量",
                "small_cycle": [1], # 此列表为空[]表示大循环，不为空则指定小循环序号
                "status": "充电",
                "work_step": [],
                "method": "截止值"
            }
            
        Returns:
        --------
        scalar (int, float, etc.)
            计算出的指标值。如果根据配置找不到数据，返回NaN。
        """
        row = []
        for config in self.ouput_indicats:
            value = self.calculate_single_metric(df, config, former_lastrow)
            row.append(value)
        return row

    def calculate_single_metric(self, df: pd.DataFrame, config, former_last_row):
        # 1.1 根据状态筛选 (来自配置中的"status")
        filtered_df = df.copy()
        step_column = "工步"
        target_column = config['column']
        if config['status'] != "全部":
            filtered_df = filtered_df[filtered_df['status'] == config['status']]

        # 1.2 根据工步筛选 (如果配置中"work_step"列表不为空)
        if config['work_step']:
            filtered_df = filtered_df[filtered_df[step_column].isin(config['work_step'])]

        # 1.3 根据小循环筛选 (这是关键！)
        # 如果配置指定了小循环序号（例如[1]），则筛选出小循环1的数据
        if config['small_cycle']:
            # 假设您的数据中有一个名为“小循环序号”或类似的列
            filtered_df = filtered_df[filtered_df['small_cycle'] == config['small_cycle']]
        target_series = filtered_df[config['column']]
        if target_series.empty:
            return np.nan
        # 根据 method 调用不同的计算函数
        if config['method'] == '起始值':
            return target_series.iloc[0]
        elif config['method'] == '截止值':
            return target_series.iloc[-1]
        elif config['method'] == '最大值':
            return target_series.max()
        elif config['method'] == '最小值': 
            # 用于容量、能量等，需要时间列
            return target_series.min()
        elif config['method'] == '行末累加': 
            return self.sum_tail_values(df, step_column, target_column)
        elif config['method'] == '差值累加':
            return self.sum_range_diffs(
                df, filtered_df, step_column, target_column, former_last_row
            )
        elif config['method'] == "差值 (最后一行 - 第一行前一行)":
            return self.diff_last(df, filtered_df, target_column, former_last_row)
        else:
            return np.nan

    @staticmethod
    def diff_last(source_df, filtered_df, target_column, former_last_row):
        first_index = filtered_df.index[0]
        prev_index = first_index - 1
        if prev_index >= source_df.index[0]:
            first_row = source_df.loc[prev_index, :]
        else:
            first_row = former_last_row
        last_row = filtered_df.iloc[-1]
        return last_row[target_column] - first_row[target_column]

    @staticmethod
    def sum_tail_values(df: pd.DataFrame, step_column: str, target_column: str) -> float:
        """
        行末累加函数：按照指定列分组，计算每个分组最后一行的目标列值之和
        
        参数:
        df (pd.DataFrame): 输入的DataFrame
        step_column (str): 用于分组的列名
        target_column (str): 需要计算值的列名
        
        返回:
        float: 所有分组最后一行目标列值的总和
        """
        # 按 step_column 分组，并获取每个分组 target_column 列的最后一个值
        last_values = df.groupby(step_column)[target_column].last()

        # 对这些最后一个值求和并返回
        return last_values.sum()

    def calculate_df(self):
        """处理数据框中基于"列1-列2"格式的减法计算"""
        df = self.df
        all_columns = set(df.columns)
        calculated = set()  # 避免重复计算
        
        for item in self.ouput_indicats:  # 修正拼写
            target_col = item['column']
            if target_col in calculated:
                continue

            # 仅处理含减法的列名
            if '-' not in target_col:
                continue

            # 分割为两个运算列
            cols = target_col.split('-')
            if len(cols) != 2:  # 确保恰好两列参与运算
                df[target_col] = np.nan
                continue
            col1, col2 = cols

            # 校验列存在性和数值类型
            if col1 in all_columns and col2 in all_columns:
                if pd.api.types.is_numeric_dtype(df[col1]) and pd.api.types.is_numeric_dtype(df[col2]):
                    df[target_col] = df[col1] - df[col2]
                    calculated.add(target_col)
                    continue
            
            # 不满足条件则设为NaN
            df[target_col] = np.nan

    @staticmethod
    def sum_range_diffs(
        source_df: pd.DataFrame, 
        df: pd.DataFrame, 
        step_column: str, 
        target_column: str,
        former_last_row: pd.Series
    ) -> float:
        """
        差值累加函数：按照指定列分组，计算每个分组最后一行的目标列值之和

        参数:
        source_df: 原始数据源DataFrame
        df: 经过筛选的DataFrame
        step_column: 用于分组的列名
        target_column: 需要计算值的列名
        former_last_row: 当分组第一行的前一行不存在时使用的默认行

        返回:
        float: 所有分组差值之和
        """
        total_sum = 0.0
        
        # 按指定列分组
        grouped = df.groupby(step_column)
        
        for _, group in grouped:
            # 获取当前分组的第一行索引
            first_index = group.index[0]
            
            # 查找第一行的前一行
            prev_index = first_index - 1
            
            if prev_index >= source_df.index[0]:
                # 如果前一行存在于原始数据源中
                prev_value = source_df.loc[prev_index, target_column]
            else:
                # 如果不存在，使用提供的默认行
                prev_value = former_last_row[target_column]
            # 获取当前分组的最后一行目标值
            last_value = group[target_column].iloc[-1]
            # 计算差值并累加
            total_sum += (last_value - prev_value)
        return total_sum

    def split_df(self):
        """将文件根据循环次数不同，拆分成每个循环次数不同的数据"""
        df = self.df
        work_step_df = pd.DataFrame(self.small_cycle)
        # 如果都是空的，说明不需要小循环
        if work_step_df.isna().all().all():
            cycle_columns = self.device_config['cycle_num'].split("、")
            # 既然不需要小循环，为了后面不报错，就把小循环列置为空
            df['small_cycle'] = np.nan
            prev_values = df[cycle_columns].shift(1)
            df['cycle_num'] = (prev_values.ne(
                df[cycle_columns])).any(axis=1).cumsum()
        # 当存在小循环时候，需要按照小循环来实现对数据的分割，包括大循环
        else:
            bins = work_step_df[work_step_df['big_cycle'].shift(1) != work_step_df['big_cycle']].index.tolist()
            prev_values = df[["工步"]].shift(1)
            df['work_step'] = (prev_values.ne(df[['工步']])).any(axis=1).cumsum()
            # 大循环
            indices = np.digitize(df['work_step'], bins, right=False)
            df['cycle_num'] = indices
            # 小循环
            df = self.add_cycle_number(work_step_df, df, "small_cycle")
        self.max_cycle = df['cycle_num'].iloc[-1]
        former_lastrow = pd.Series(0, index=df.columns.tolist())
        for counter, group in df.groupby('cycle_num'):
            yield counter, group, former_lastrow
            former_lastrow = group.iloc[-1]

    @staticmethod
    def add_cycle_number(df1, df2, cycle_col='循环序号'):
        """
        为第二个DataFrame添加循环序号列，基于第一个DataFrame的映射关系。
        假设第一个DataFrame的工步号等于其索引+1。
        
        参数:
        df1 (pd.DataFrame): 包含循环序号的第一个DataFrame，其索引+1对应工步号
        df2 (pd.DataFrame): 需要添加循环序号的第二个DataFrame，包含工步号列
        cycle_col (str): 循环序号列的名称，默认为'循环序号'
        
        返回:
        pd.DataFrame: 添加了循环序号列的第二个DataFrame
        """
        # 创建映射系列，索引为work_step，值为循环序号
        mapping_series = pd.Series(df1[cycle_col].values, index=df1.index + 1)
        # 使用map方法根据工步号映射循环序号，未找到的映射返回NaN
        df2[cycle_col] = df2['work_step'].map(mapping_series)
        return df2

    @stastics(name="Pack离线通用数据处理脚本")
    def start_work(self):
        try:
            from utils.battery_cycle_data_writer import BatteryCycleDataWriter
            battery_writer = BatteryCycleDataWriter(self.ouput_indicats)
            # 创建表头
            battery_writer.create_header()
            # 设置列宽
            battery_writer.set_column_widths()
            # 添加示例数据行
            cycle = 1
            for row, row_data in enumerate(self.get_cycles(), 4):
                # 加入循环次数
                battery_writer.add_data_row([cycle, *row_data], row)
                cycle += 1
            # 保存文件
            file_name = datetime.now().strftime("%Y-%m-%d_%H-%M")  
            dst_filename = f"data_{file_name}.xlsx" 
            battery_writer.save(dst_filename)
            os.startfile(dst_filename)
        except Exception as e:
            # 获取错误的详细信息并发送信号
            error_message = traceback.format_exc().strip().splitlines()[-1]
            self.except_signal.emit(error_message)
            raise e
        return int(self.df.shape[0]/100000)

    def run(self):
        self.progress.emit(0)
        self.start_work()
        self.progress.emit(100)
