import traceback
from datetime import datetime
from pathlib import Path
import pandas as pd
from PyQt6.QtCore import QThread, pyqtSignal
from PyQt6.QtWidgets import QApplication  # 添加QApplication引用

CRITICAL_COLUMNS = ['odm_desc', 'product_code']
AUXILIARY_COLUMNS = ['ssd_family_name', 'mm_number', 'ssd_name']


class ProcessThread(QThread):
    finished = pyqtSignal(int, str)
    progress_updated = pyqtSignal(int)
    log_updated = pyqtSignal(str, str)  # log level, log message

    def __init__(self, data_map, params):
        super().__init__()
        self.data_map = data_map
        self.params = params

    def run(self):
        try:
            print("Starting data processing thread...")
            self.log_updated.emit("INFO", "Starting data processing thread...")
            self.log_updated.emit("DEBUG", "Thread initialization complete. Starting data processing...")
            return_code, return_msg = self.process_data()
            self.log_updated.emit("INFO", "Data processing completed successfully.")
            self.log_updated.emit("DEBUG", "Thread execution finished. Emitting completion signal...")
            self.finished.emit(return_code, return_msg)
        except Exception as e:
            self.log_updated.emit("ERROR", f"An error occurred: {str(e)}")
            self.log_updated.emit("DEBUG", f"Exception details: {traceback.format_exc()}")
            print(e)
            print(traceback.format_exc())
            self.finished.emit(1, f"Failed: {e}")

    def process_data(self):
        if self.params['mode'] == 0:
            self.log_updated.emit("INFO", "Processing data in default mode...")
            self.log_updated.emit("DEBUG", "Entering default processing mode...")
            return self.process_data_mode_default()
        else:
            self.log_updated.emit("INFO", "Processing data in user-defined mode...")
            self.log_updated.emit("DEBUG", "Entering user-defined processing mode...")
            return self.process_data_mode_user_defined()

    def process_data_mode_default(self):
        try:
            base_week = self.params['base_week']
            base_data_path = Path(self.params['input_path'] + '/' + self.data_map.get(base_week))
            comparison_week = self.params['comparison_week']
            comparison_data_path = Path(self.params['input_path'] + '/' + self.data_map.get(comparison_week))
            datetime_str = datetime.now().strftime("%Y%m%d-%H%M%S")

            week_output_path = Path(self.params['output_path']) / f"output(Week)-{datetime_str}.xlsx"
            month_output_path = Path(self.params['output_path']) / f"output(Month)-{datetime_str}.xlsx"
            self.log_updated.emit("INFO", f"Processing PRF changes from {base_week} to {comparison_week}...")
            self.log_updated.emit("DEBUG",
                                  f"Base data path: {base_data_path}, Comparison data path: {comparison_data_path}")
            self.process_prf_changes(base_data_path, comparison_data_path, str(comparison_week), week_output_path)
            self.log_updated.emit("INFO", "Processing monthly report...")
            self.log_updated.emit("DEBUG",
                                  f"Processing monthly report parameters:{base_data_path}-{comparison_data_path}-{base_week}-{comparison_week}-{month_output_path}")
            self.process_monthly_report(base_data_path, comparison_data_path, str(base_week), str(comparison_week),
                                        month_output_path)
            self.log_updated.emit("INFO", "Merging Excel files vertically...")
            self.merge_excel_vertically()
            self.log_updated.emit("INFO", "PRF changes processed successfully.")
            self.log_updated.emit("DEBUG", "Default mode processing completed successfully.")
            return 0, "Success"
        except Exception as e:
            self.log_updated.emit("ERROR", f"An error occurred in process_data_mode_default: {str(e)}")
            self.log_updated.emit("DEBUG", f"Exception details: {traceback.format_exc()}")
            return 1, f"Failed: {e}"

    def process_prf_changes(self,
                            baseline_path: Path,  # 基准文件路径（如202516_2_PRF.xlsx）
                            updated_path: Path,  # 更新文件路径（如202519.2_PRF.xlsx）
                            start_week: str,  # 起始周数（如"202521"）
                            output_path: Path = "output.xlsx"
                            ) -> None:
        """
        计算两个版本间的生产计划变化量

        参数:
            baseline_path (str): 基准数据文件路径
            updated_path (str): 更新数据文件路径
            start_week (str): 起始周数（格式YYYYWW）
            output_path (str): 输出文件路径
        """
        self.log_updated.emit("INFO", f"Reading baseline data from {baseline_path}...")
        self.log_updated.emit("DEBUG", f"Attempting to read baseline data from: {baseline_path}")
        df_baseline = pd.read_excel(baseline_path)
        self.log_updated.emit("INFO", f"Reading updated data from {updated_path}...")
        self.log_updated.emit("DEBUG", f"Attempting to read updated data from: {updated_path}")
        df_updated = pd.read_excel(updated_path)

        # 2. 生成八周列名
        week_columns = self._generate_week_sequence(start_week)
        self.log_updated.emit("DEBUG", f"Generated week columns: {week_columns}")

        # 3. 按唯一键合并数据（假设product_code为唯一标识）
        merge_keys = ['odm_desc', 'mm_number']
        self.log_updated.emit("INFO", "Merging baseline and updated data...")
        self.log_updated.emit("DEBUG", f"Merge keys: {merge_keys}")
        df_merged = pd.merge(
            df_baseline,
            df_updated,
            on=merge_keys,
            suffixes=('_baseline', '_updated'),
            how='outer'
        )
        print('================'*10)
        print(df_merged.columns)
        # 添加主键校验
        if df_merged[merge_keys].duplicated().any():
            raise ValueError("发现重复的odm_desc+mm_number组合")
        self.log_updated.emit("DEBUG", f"Merged data shape: {df_merged.shape}")

        # 4. 计算变化量（updated - baseline）
        delta_data = []
        for week in week_columns:
            baseline_col = f"{week}_baseline"
            updated_col = f"{week}_updated"

            # 处理缺失值（默认为0变化）
            df_merged[baseline_col] = df_merged[baseline_col].fillna(0)
            df_merged[updated_col] = df_merged[updated_col].fillna(0)

            # 计算单周变化量
            df_merged[week] = df_merged[updated_col] - df_merged[baseline_col]
            delta_data.append(week)
            self.log_updated.emit("DEBUG", f"Calculated delta for week: {week}")

        # 5. 提取关键字段 + 变化量
        keep_columns = ['odm_desc', 'mm_number',  'product_code_baseline', # 直接使用合并后的字段
                        'ssd_family_name_updated', 'ssd_name_updated'] + delta_data
        self.log_updated.emit("INFO", "Extracting key fields and calculating deltas...")
        self.log_updated.emit("DEBUG", f"Keeping columns: {keep_columns}")
        df_delta = df_merged[keep_columns].rename(columns={
            'odm_desc_updated': 'odm_desc',
            'ssd_family_name_updated': 'ssd_family_name',
            'mm_number_updated': 'mm_number',
            'product_code_baseline': 'product_code',
            'ssd_name_updated': 'ssd_name'
        })
        self.log_updated.emit("DEBUG", f"Renamed columns: {df_delta.columns.tolist()}")

        # 计算Density和Series列
        # self.log_updated.emit("INFO", "Calculating Density and Series columns...")
        # self.log_updated.emit("DEBUG", "Starting Density and Series calculations...")
        # df_delta['Density'] = df_delta.apply(
        #     lambda row: self.calculate_density(row['ssd_family_name'], row['ssd_name']),
        #     axis=1)
        # df_delta['Series'] = df_delta.apply(lambda row: self.calculate_series(row['ssd_family_name'], row['ssd_name']),
        #                                     axis=1)
        # self.log_updated.emit("DEBUG", "Density and Series calculations completed.")

        # 6. 计算总变化量DELTA
        self.log_updated.emit("INFO", "Calculating total delta for each row...")
        self.log_updated.emit("DEBUG", f"Delta columns: {delta_data}")
        df_delta["DELTA"] = df_delta[delta_data].sum(axis=1)
        self.log_updated.emit("DEBUG", f"Total DELTA calculated. Shape: {df_delta.shape}")
        print('------------------'*10)
        print(df_delta.columns)
        # 重新排列列顺序
        columns_order = ['odm_desc', 'ssd_family_name', 'mm_number', 'product_code', 'ssd_name', 'DELTA'] + week_columns
        df_delta = df_delta[columns_order]
        self.log_updated.emit("DEBUG", f"Final column order: {columns_order}")

        # 7. 分组写入Excel
        self.log_updated.emit("INFO", f"Writing output to {output_path}...")
        self.log_updated.emit("DEBUG", f"Output path: {output_path}")
        with pd.ExcelWriter(output_path, engine="xlsxwriter") as writer:
            for group_name, group_data in df_delta.groupby("odm_desc"):
                # 添加汇总行
                summary = group_data[delta_data + ["DELTA"]].sum()
                summary_row = pd.DataFrame(
                    [summary.values],
                    columns=delta_data + ["DELTA"]
                )
                summary_row.insert(0, "odm_desc", "TOTAL")
                final_df = pd.concat([group_data, summary_row], ignore_index=True)

                # 写入Sheet
                final_df.to_excel(writer, sheet_name=group_name, index=False)

                # 设置Excel格式
                workbook = writer.book
                worksheet = writer.sheets[group_name]
                red_format = workbook.add_format({"num_format": "#,##0;[Red](#,##0);0"})
                for col in delta_data + ["DELTA"]:
                    col_idx = final_df.columns.get_loc(col)
                    worksheet.set_column(col_idx, col_idx, None, red_format)
        self.log_updated.emit("DEBUG", f"Excel file written successfully to: {output_path}")

    @staticmethod
    def _generate_week_sequence(start_week: str) -> list:
        """生成连续八周列名（处理跨年）"""
        year = int(start_week[:4])
        week = int(start_week[4:])
        weeks = []
        for _ in range(8):
            weeks.append(f"{year}{week:02d}")
            week += 1
            if week > 52:
                year += 1
                week = 1
        return weeks

    @staticmethod
    def calculate_density(ssd_family_name: str, ssd_name: str) -> str:
        """计算Density列的值"""
        if pd.isna(ssd_family_name) or pd.isna(ssd_name):
            return ''
        # 提取分隔符：ssd_family_name的后6位
        separator = ssd_family_name[-6:].strip()
        # 分割ssd_name
        parts = ssd_name.split(separator, 1)
        if len(parts) < 2:
            return ''
        # 处理第二部分
        part2 = parts[1].strip().split('B ')
        if len(part2) >= 2:
            return part2[0] + 'B'
        return ''

    @staticmethod
    def calculate_series(ssd_family_name: str, ssd_name: str) -> str:
        """计算Series列的值"""
        if pd.isna(ssd_family_name) or pd.isna(ssd_name):
            return ''
        separator = ssd_family_name[-6:].strip()
        parts = ssd_name.split(separator, 1)
        if len(parts) < 2:
            return ''
        part2 = parts[1].strip().split('B ')
        part3 = part2[1].split(' ')
        if len(part3) >= 1:
            return part3[0]
        return ''

    def process_data_mode_user_defined(self):
        pass

    def merge_excel_vertically(self):
        """
        将多个Excel文件按列名纵向合并，缺失列填充0
        新增第一列Week，值为文件名前六位

        示例：
        merge_excel_vertically(['file1.xlsx', 'file2.xlsx'], 'combined.xlsx')
        """
        datetime_str = datetime.now().strftime("%Y%m%d-%H%M%S")
        output_file = Path(self.params['output_path']) / f"prf_merge-{datetime_str}.xlsx"
        input_path = self.params['input_path']
        file_paths = [Path(input_path) / file for file in self.data_map.values()]
        self.log_updated.emit("INFO", f"Merging Excel files vertically from {file_paths} to {output_file}...")

        dfs = []
        for file in file_paths:
            # 读取Excel文件
            df = pd.read_excel(file)

            # 提取文件名前六位作为Week值
            week_value = file.stem[:6]  # 使用Path对象的stem属性（不带扩展名的文件名）

            # 在第一列插入Week列
            df.insert(0, 'Week', week_value)

            dfs.append(df)

        # 纵向合并（保留所有列）
        combined_df = pd.concat(dfs, axis=0, ignore_index=True, join='outer')

        # 填充缺失值并重置索引
        combined_df = combined_df.fillna(0).reset_index(drop=True)

        # 保存结果
        combined_df.to_excel(output_file, index=False)

    @staticmethod
    def parse_week(week_str, week_format='%Y%W'):
        """将周数字符串转换为周一的日期时间对象"""
        year = int(week_str[:4])
        week = int(week_str[4:])
        # ISO标准周的周一作为周开始
        monday = datetime.fromisocalendar(year, week, 1)
        return monday


    @staticmethod
    def process_sheet(df, base_version, compare_version):
        # 确保 df 是一个独立的副本，避免 SettingWithCopyWarning
        df = df.copy()

        # 调整列顺序
        cols = df.columns.tolist()
        product_code = ['odm_desc', 'ssd_family_name', 'mm_number', 'product_code', 'ssd_name']
        month_cols = [col for col in cols if col not in product_code]

        # 解析月份和类型
        parsed = []
        for col in month_cols:
            if '_delta' in col:
                month = col.replace('_delta', '')
                type_ = 'delta'
            else:
                month, type_ = col.rsplit('_', 1)
            parsed.append((month, type_, col))

        # 按月份和类型排序
        months = sorted(set([p[0] for p in parsed]), key=lambda x: pd.to_datetime(x))
        ordered_cols = []
        for month in months:
            ordered_cols.extend([p[2] for p in parsed if p[0] == month and p[1] == base_version])
            ordered_cols.extend([p[2] for p in parsed if p[0] == month and p[1] == compare_version])
            ordered_cols.extend([p[2] for p in parsed if p[0] == month and p[1] == 'delta'])

        df = df[product_code + ordered_cols]

        # 添加横向汇总
        base_cols = [col for col in df.columns if col.endswith(f'_{base_version}')]
        compare_cols = [col for col in df.columns if col.endswith(f'_{compare_version}')]
        delta_cols = [col for col in df.columns if col.endswith('_delta')]

        # 使用 .loc 避免 SettingWithCopyWarning
        df.loc[:, f'Total_{base_version}'] = df[base_cols].sum(axis=1)
        df.loc[:, f'Total_{compare_version}'] = df[compare_cols].sum(axis=1)
        df.loc[:, 'Total_delta'] = df[delta_cols].sum(axis=1)

        # 添加纵向汇总
        sum_row = df.select_dtypes(include='number').sum().to_frame().T
        sum_row['product_code'] = 'Total'
        df = pd.concat([df, sum_row], ignore_index=True)

        return df

    @staticmethod
    def merge_metadata(df_source, df_target):
        """
        将源DF中的元数据列合并到目标DF中

        参数:
            df_source: 包含元数据的源DataFrame（需包含odm_desc, product_code及元数据列）
            df_target: 需要补充元数据的目标DataFrame（需包含odm_desc, product_code）

        返回:
            补充了元数据的目标DataFrame
        """
        # 定义需要合并的元数据列
        # metadata_cols = ['ssd_family_name', 'mm_number', 'ssd_name']
        metadata_cols = [col for col in AUXILIARY_COLUMNS if col in df_source.columns]

        # 提取源DF中的关键列和元数据列
        source_metadata = df_source[CRITICAL_COLUMNS + metadata_cols].drop_duplicates()

        # 合并到目标DF（保留目标DF的所有行）
        merged = pd.merge(
            df_target,
            source_metadata,
            on=CRITICAL_COLUMNS,
            how='left'  # 保留目标DF的所有行，未匹配到的元数据为NaN
        )

        return merged

    def process_version(self, df, version_name):
        """处理单个版本文件"""
        # 转换周列为月份
        week_columns = [col for col in df.columns if col.isdigit()]
        melted = df.melt(
            id_vars=['odm_desc', 'product_code'],
            value_vars=week_columns,
            var_name='week',
            value_name=version_name
        )

        # 添加月份列
        melted['month'] = melted['week'].apply(
            lambda x: self.parse_week(x).strftime('%Y-%m')
        )

        # 按产品+月份聚合
        grouped = melted.groupby(['odm_desc', 'product_code', 'month'])[version_name].sum().reset_index()
        return grouped

    def process_monthly_report(self, base_data_path, compare_data_path, base_week, compare_week, output_path):

        # 主处理逻辑
        base_version = f"V{base_week[-2:]}"
        compare_version = f"V{compare_week[-2:]}"
        start_month = self.parse_week(compare_week).month + 1
        start_month = f"{compare_week[:4]}-{start_month:02d}"
        end_month = f"{compare_week[:4]}-12"
        base_data = pd.read_excel(base_data_path)
        compare_data = pd.read_excel(compare_data_path)
        base = self.process_version(base_data.copy(), base_version)
        compare = self.process_version(compare_data.copy(), compare_version)

        # 合并两个版本
        merged = pd.merge(
            base,
            compare,
            on=['odm_desc', 'product_code', 'month'],
            how='outer'
        )

        # 过滤时间范围 (2025-05 至2025-12)
        merged = merged[(merged['month'] >= start_month) & (merged['month'] <= end_month)]

        # 计算差异
        merged['delta'] = merged[compare_version].fillna(0) - merged[base_version].fillna(0)

        # 转换为宽表格式，按月份生成列组
        final = merged.pivot_table(
            index=CRITICAL_COLUMNS,
            columns='month',
            values=[base_version, compare_version, 'delta'],
            aggfunc='first'
        )

        # 拼接多级列头
        final.columns = [f'{month}_{col}' for col, month in final.columns]
        final = final.reset_index()
        final = self.merge_metadata(base_data, final)
        # 按厂商分Sheet输出
        with pd.ExcelWriter(output_path, engine="xlsxwriter") as writer:
            for odm in final['odm_desc'].unique():
                odm_df = final[final['odm_desc'] == odm].copy()
                odm_df = self.process_sheet(odm_df, base_version, compare_version)

                # 写入数据
                odm_df.to_excel(writer, sheet_name=odm[:31], index=False)

                # 获取工作簿和工作表对象
                workbook = writer.book
                worksheet = writer.sheets[odm[:31]]

                # 定义格式
                red_format = workbook.add_format({"num_format": "#,##0;[Red](#,##0);0"})
                yellow_format = workbook.add_format({
                    "num_format": "#,##0;[Red](#,##0);0",  # 保持与red_format相同的数值格式
                    "bg_color": "#FFFF00"  # 黄色背景
                })

                # 设置delta列的黄色背景
                for col_num, col_name in enumerate(odm_df.columns):
                    if col_name.endswith('_delta'):
                        worksheet.set_column(col_num, col_num, None, yellow_format)

                # 应用条件格式到所有数值列（包括delta列）
                for col_num, col_name in enumerate(odm_df.columns):
                    if col_name not in CRITICAL_COLUMNS + AUXILIARY_COLUMNS:
                        # 应用条件格式，负值显示红色字体
                        worksheet.conditional_format(
                            1, col_num, len(odm_df), col_num,  # 范围从第二行到数据末尾
                            {
                                'type': 'cell',
                                'criteria': '<',
                                'value': 0,
                                'format': red_format
                            }
                        )
                        # 额外条件格式确保处理数值类型
                        worksheet.conditional_format(
                            1, col_num, len(odm_df), col_num,
                            {
                                'type': 'formula',
                                'criteria': f'=AND(ISNUMBER({chr(65 + col_num)}2), {chr(65 + col_num)}2<0)',
                                'format': red_format
                            }
                        )


if __name__ == '__main__':
    # 当单独执行时创建Qt应用上下文
    app = QApplication([])  # 创建隐藏的QApplication实例
    dm = {202516: '202516_2_PRF.xlsx',
          202517: '202517_2_PRF.xlsx',
          202519: '202519.2 PRF.xlsx',
          202520: '202520.2 PRF.xlsx'}
    para = {'input_path': 'E:/work/getProject/2025/05/0518/PRF_process_tool/data',
            'output_path': 'E:/work/getProject/2025/05/0518/PRF_process_tool/output',
            'mode': 0,
            'base_week': 202516,
            'comparison_week': 202519,
            }
    new_para = {'mode': 0,
                'base_week': 202517,
                'comparison_week': 202519,
                'input_path': 'E:/work/getProject/2025/05/0518/PRF_process_tool/data',
                'output_path': 'E:/work/getProject/2025/05/0518/PRF_process_tool/output/output-20250520-140319.xlsx'}

    # 创建线程并连接信号
    thread = ProcessThread(dm, para)


    # 添加调试打印的槽函数
    def handle_finished(code, msg):
        print(f"\n[DEBUG] 执行结果: {code}, {msg}")
        app.quit()  # 执行完成后退出事件循环


    thread.finished.connect(handle_finished)
    thread.progress_updated.connect(lambda x: print(f"\n[DEBUG] 进度更新: {x}"))
    thread.log_updated.connect(lambda x, y: print(f"\n[DEBUG] 日志更新: {x}, {y}"))

    print("=== 开始调试执行 ===")
    thread.start()

    # 进入事件循环等待线程完成
    app.exec()

    print("=== 调试执行结束 ===")
