import copy
import json
import os
import sys
import math
import re
from collections import defaultdict
from datetime import datetime
import warnings
from typing import Dict, List

import numpy as np
import pandas as pd
from pandas import ExcelWriter
from openpyxl.styles import Font, Border, Side, Alignment
from openpyxl.utils import get_column_letter

warnings.filterwarnings("ignore")

from calculator.comm_vars import PCB_LTIM_LIST, REPLACEMENT_GROUP, REPLACEMENT_PARTS
from calculator.process_consigned_data import process_cs_bom_data, process_consigned_pti_ssdd, \
    process_consigned_pega_ssdd, calculate_material_ratios

missing_ipns = []
OPO_DICT = {}
PCB_DICT = {}
SLT_DICT = {}
UP_DICT = {}
CATEGORY_DICT = {}


UPSIDE = "(Upside)"
SPACIAL_ALT_ID = 313
SPACIAL_IPN = 'G78082-001'
correction_data_path = '../data/DataChange.xlsx'
correction_data_path = os.path.realpath(os.path.join(os.path.dirname(__file__), correction_data_path))
last_dcr_data_path = "../data/last_dcr_data.xlsx"
last_dcr_data_path = os.path.realpath(os.path.join(os.path.dirname(__file__), last_dcr_data_path))

Separator = "#" * 40


def apply_ipn_corrections(grouped_df, correction_df, correction_column_name):
    """应用IPN修正的通用函数"""
    print(f"correction_column_name:{correction_column_name}")
    if not correction_df.empty:
        mapping = correction_df.drop_duplicates('Current', keep='last') \
            .set_index('Current')['Change to'] \
            .to_dict()
        grouped_df[correction_column_name] = grouped_df[correction_column_name].astype(str) \
            .map(mapping) \
            .fillna(grouped_df[correction_column_name])
    grouped_df = grouped_df[grouped_df[correction_column_name] != 'Delete']
    return grouped_df


def read_excel_with_dynamic_header(file_path, sheet_name=None):
    # 读取整个 Excel 文件
    df = pd.read_excel(file_path, sheet_name=sheet_name, header=None)
    # 找到第一列中第一个非空单元格的行索引（排除NaN和空字符串）
    first_non_empty_row = df[0].apply(lambda x: x if pd.notna(x) and str(x).strip() != '' else None).first_valid_index()

    if first_non_empty_row is None:
        print("第一列没有找到非空单元格")
        return None

    # 使用该行作为列名
    headers = df.iloc[first_non_empty_row].tolist()

    # 从该行的下一行开始读取数据
    data = df.iloc[first_non_empty_row + 1:].values

    # 创建新的 DataFrame，使用找到的列名
    new_df = pd.DataFrame(data, columns=headers)

    return new_df


def update_ipn(df1, df2):
    merged_df = df1.merge(
        df2[['Materials', 'IPN']],
        on='Materials',
        how='left',
        suffixes=('_original', '')
    )
    merged_df['IPN'] = merged_df['IPN'].fillna(merged_df['IPN (Customer PN)'])
    return merged_df


def process_columns(df):
    # 1. 筛选目标行（保留Type列用于后续处理）
    filtered_df = df[df['Type(V)'].isin(['1.REQUIRE', '68.PR/PO_QTY', '681.PR/PO_QTY_EXTRA'])].copy()
    Exp_column_name = 'Exp for Smart DOI'
    # 2. 识别周数据列（保持原始列名）
    week_columns = [
        col for col in df.columns
        if re.match(r"^\d{6}/\d{8}~\d{8}$", col)
    ]
    if 'Exp for Smart DOI' not in filtered_df.columns:
        filtered_df[Exp_column_name] = 0

    # 3. 将指定类型的周数据置零
    # mask = filtered_df['Type(V)'].isin(['68.PR/PO_QTY', '681.PR/PO_QTY_EXTRA'])
    # stock_OH_mask = filtered_df['Type(V)'] == '2.RECEIPT'
    consume_mask = filtered_df['Type(V)'] == '1.REQUIRE'
    # filtered_df.loc[mask, week_columns] = 0  # 整行周数据置零
    # filtered_df.loc[stock_OH_mask, ["Stock OH Last Dock Date", Exp_column_name]] = 0

    # 4. 选择最终需要保留的列（排除Type列）
    selected_columns = ['Alt ID', 'Type(V)', "Stock OH Last Dock Date"] + week_columns + ["IPN"] + [Exp_column_name]
    filtered_df = filtered_df[selected_columns]

    filtered_df.loc[consume_mask, ["Stock OH Last Dock Date", Exp_column_name]] = 0

    # 5. 重命名列
    filtered_df = filtered_df.rename(columns={"Stock OH Last Dock Date": "BOH"})
    rename_dict = {col: str(int(col.split("/")[0]) + 1) for col in week_columns}
    filtered_df = filtered_df.rename(columns=rename_dict)
    # print(filtered_df.columns)

    forth_week = filtered_df.columns[6]
    filtered_df[forth_week] = filtered_df[forth_week] + filtered_df[Exp_column_name]
    # return filtered_df
    filtered_df = filtered_df.drop(columns=[Exp_column_name])
    # 6. 调整列顺序
    ordered_columns = ["IPN", "BOH"] + [col for col in filtered_df.columns if col not in ["IPN", "BOH"]]
    first_week = filtered_df.columns[3]

    # 7. 处理预生产逻辑
    filtered_df = process_require_data(filtered_df[ordered_columns], first_week)

    return filtered_df


def process_require_data(df, first_week, odm_type='PTI'):
    # 转换列名确保统一（处理可能的空格问题）
    group_id = 'Alt ID'
    require = '1.REQUIRE'
    if odm_type == 'PEGA':
        group_id = 'Group ID'
        require = 'Demand'

    df = df.copy()
    df.columns = df.columns.str.strip()

    # 添加一列存储原始库存值
    df['Original_BOH'] = df['BOH']

    # 按Alt ID分组处理
    result_dfs = []
    for alt_id, group in df.groupby(group_id):
        # 找到该组的消耗总量 (Type(V)列为'1.REQUIRE'的202525值)
        require_rows = group[group['Type(V)'] == require]
        if len(require_rows) > 0:
            total_consumption = require_rows.iloc[0][first_week]
        else:
            total_consumption = 0

        # 计算该组总库存
        total_boh = group['BOH'].sum()

        # 仅在有库存且需要消耗时才分配
        if total_boh > 0 and total_consumption > 0:
            # 计算每行分配比例和应Consumption Volume
            group['Consumption_Ratio'] = group['BOH'] / total_boh
            group['Assigned_Consumption'] = group['Consumption_Ratio'] * total_consumption

            # 更新库存 (BOH列减去分配的Consumption Volume)
            group['BOH'] = group['BOH'] - group['Assigned_Consumption']

            # 添加组处理标记
            group['Processed'] = True
        else:
            group['Assigned_Consumption'] = 0
            group['Processed'] = False

        result_dfs.append(group)

    # 合并所有组的结果
    result_df = pd.concat(result_dfs).reset_index(drop=True)
    consume_mask = result_df['Type(V)'] == require
    other_columns = [col for col in result_df.columns if
                     col not in ['IPN', 'BOH', group_id, 'Type(V)', 'Consumption_Ratio', 'Processed', 'Original_BOH',
                                 'Assigned_Consumption']]
    print(other_columns)
    result_df.loc[consume_mask, other_columns] = 0
    # 删除临时列并整理列顺序
    result_df = result_df.drop(
        columns=[group_id, 'Type(V)', 'Consumption_Ratio', 'Processed', 'Original_BOH', 'Assigned_Consumption', ])

    return result_df


def remove_nan_string_rows(df):
    """
    删除IPN列中包含字符串'NaN'的行

    参数:
    df -- 包含'IPN'列的DataFrame

    返回:
    清理后的DataFrame
    """
    # 确保IPN列是字符串类型
    df['IPN'] = df['IPN'].astype(str)

    # 创建过滤条件：选择IPN不是"NaN"或"nan"的行
    # strip()方法用于处理可能存在的空格
    condition = ~df['IPN'].str.strip().str.lower().isin(['nan', 'nan'])

    # 应用过滤条件
    cleaned_df = df[condition].copy()

    return cleaned_df


def process_pti_ssdd_data(data_path, inventory_path):
    # 读取 Excel 文件
    correction_df = pd.read_excel(correction_data_path,
                                  sheet_name='PTI_SSDD',
                                  usecols=[0, 1])  # 读取A列和B列（索引0和1）
    pti_ssdd_data = read_excel_with_dynamic_header(data_path, "SSDD")
    print("pti_ssdd_data:", pti_ssdd_data.columns)

    if pti_ssdd_data is None or pti_ssdd_data.empty:
        return None

    pti_ssdd_data = pti_ssdd_data.rename(columns={"IPN (Customer PN)": "IPN"})
    corrected_df = apply_ipn_corrections(pti_ssdd_data.copy(), correction_df, 'IPN')

    lead_time_dict = create_unique_dict(corrected_df, 'IPN', 'LeadTime')
    corrected_df = process_columns(corrected_df)

    # 按 IPN 列分组，并对第 24 列之后的列求和
    corrected_df = corrected_df.groupby('IPN', as_index=False).sum()
    if inventory_path and os.path.exists(inventory_path):
        print("add inventory data")
        inventory_df = read_pti_inventory_data(inventory_path)
        # 获取在 inventory_df 存在但不在 corrected_df 中的 IPN 值
        global missing_ipns
        missing_ipns = inventory_df[~inventory_df['IPN'].isin(corrected_df['IPN'])]['IPN'].tolist()
        corrected_df = merge_inventory_data(corrected_df, inventory_df)
    print(corrected_df.iloc[:, 0:4])
    # 返回分组求和后的数据
    return corrected_df, lead_time_dict


def merge_inventory_data(corrected_df, inventory_df):
    # 找出inventory_df中不在corrected_df中的IPN
    new_ipns = inventory_df[~inventory_df['IPN'].isin(corrected_df['IPN'])]
    # 创建一个新的DataFrame，包含这些新IPN和BOH值
    new_rows = pd.DataFrame({
        'IPN': new_ipns['IPN'],
        'BOH': new_ipns['BOH']
    })
    # 获取corrected_df的所有列名（除了IPN和BOH）
    other_columns = [col for col in corrected_df.columns if col not in ['IPN', 'BOH']]
    # 为这些新行添加其他列，并填充为0
    for col in other_columns:
        new_rows[col] = 0
    # 将新行添加到corrected_df中
    merged_df = pd.concat([corrected_df, new_rows], ignore_index=True)
    return merged_df


def process_pega_ssdd_data(data_path, inventory_path):
    # 读取 Excel 文件
    pega_ssdd_data = pd.read_excel(data_path)
    correction_df = pd.read_excel(correction_data_path,
                                  sheet_name='PEGA_SSDD',
                                  usecols=[0, 1])  # 读取A列和B列（索引0和1）
    # 过滤 S列（Type(V)）的值为 "PO Commit" 的行

    corrected_df = apply_ipn_corrections(pega_ssdd_data.copy(), correction_df, 'IPN')
    lead_time_dict = create_unique_dict(corrected_df, 'IPN', 'Supplier Lead Time (Days)')
    filtered_data = pega_ssdd_data[pega_ssdd_data['Type(V)'].isin(['Demand (HVM PO/PRF)',
                                                                   'Demand (NPI PO/PRF)',
                                                                   'PO Commit'])].copy()

    # 提取 IPN 列、Type(V) 列以及 Y 列之后的所有列
    columns_to_extract = ['Group ID', 'IPN', 'Type(V)'] + filtered_data.columns[24:128].tolist()
    filtered_df = filtered_data[columns_to_extract]
    mask = filtered_df['Type(V)'].isin(['Demand (HVM PO/PRF)', 'Demand (NPI PO/PRF)'])
    filtered_df.loc[mask, 'Type(V)'] = 'Demand'
    # return filtered_df
    filtered_df = process_spacial_data(filtered_df)
    # 按 IPN 列分组，并对第 24 列之后的列求和
    # grouped_df = filtered_df.groupby('IPN', as_index=False).sum()
    grouped_df = filtered_df

    # 分离前两列和剩余列
    date_columns = grouped_df.columns[4:]

    # 将剩余列的列标题转换为 datetime 对象
    date_columns = pd.to_datetime(date_columns, errors='coerce')

    # 提取年份和周数，并格式化为 YYYYWW 格式
    formatted_date_columns = date_columns.map(
        lambda x: f"{x.isocalendar().year}{x.isocalendar().week:02d}" if pd.notnull(x) else x)

    # 合并前两列的原始表头和转换后的列标题
    rename_list = [str(int(col) + 1) for col in formatted_date_columns]
    new_columns = ['Group ID', 'IPN', 'Type(V)', 'BOH'] + rename_list
    grouped_df.columns = new_columns
    corrected_df = apply_ipn_corrections(grouped_df.copy(), correction_df, 'IPN')
    # corrected_df = corrected_df.groupby('IPN', as_index=False).sum()
    print(corrected_df.iloc[:, 0:4])
    df_filled = corrected_df.fillna({'IPN': 'NaN'})
    corrected_df = df_filled.groupby(['Group ID', 'Type(V)', 'IPN'], as_index=False).sum()
    first_week = corrected_df.columns[4]
    print(first_week)
    corrected_df = process_require_data(corrected_df, first_week, 'PEGA')
    corrected_df = remove_nan_string_rows(corrected_df)
    if inventory_path and os.path.exists(inventory_path):
        inventory_df = read_pega_inventory_data(inventory_path)
        global missing_ipns
        missing_ipns = inventory_df[~inventory_df['IPN'].isin(corrected_df['IPN'])]['IPN'].tolist()
        corrected_df = merge_inventory_data(corrected_df, inventory_df)
    # 返回分组求和后的数据
    return corrected_df, lead_time_dict


def read_pti_inventory_data(inventory_file_path):
    """
    读取Excel文件中包含"Aging & Expired"的sheet的第一个sheet
    """
    # 先读取所有sheet名称
    excel_file = pd.ExcelFile(inventory_file_path)
    sheet_names = excel_file.sheet_names

    # 查找包含"Aging & Expired"的sheet
    target_sheets = [name for name in sheet_names if "Aging & Expired" in name]

    if not target_sheets:
        raise ValueError("未找到包含'Aging & Expired'的sheet")

    # 选择第一个匹配的sheet进行读取
    target_sheet = target_sheets[0]
    df = pd.read_excel(inventory_file_path, sheet_name=target_sheet)
    ipn = df["IPN"]
    commodity = df["Commodity"]
    unit_price = df.iloc[:, 28]
    global UP_DICT, CATEGORY_DICT
    UP_DICT = dict(zip(ipn, unit_price))
    CATEGORY_DICT = dict(zip(ipn, commodity))
    df = df[["IPN", "OH stock(Expired included)"]]
    df = df.rename(columns={"OH stock(Expired included)": "BOH"})
    df = df.groupby('IPN', as_index=False)["BOH"].sum()
    return df


def read_pega_inventory_data(inventory_file_path):
    """
    读取Excel文件，支持xlsx和xlsb格式，提取IPN和Total Stock列并重命名

    Args:
        inventory_file_path (str): Excel文件路径

    Returns:
        pd.DataFrame: 包含IPN和BOH列的DataFrame
    """
    # 获取文件扩展名
    file_extension = os.path.splitext(inventory_file_path)[1].lower()

    # 根据文件扩展名选择读取引擎
    if file_extension == '.xlsx':
        df = pd.read_excel(inventory_file_path)
    elif file_extension == '.xlsb':
        df = pd.read_excel(inventory_file_path, engine='pyxlsb')
    else:
        raise ValueError(f"不支持的文件格式: {file_extension}，仅支持.xlsx和.xlsb格式")

    ipn = df["IPN"]
    unit_price = df["U/P"]
    commodity = df["Commodity"]
    global UP_DICT, CATEGORY_DICT
    UP_DICT = dict(zip(ipn, unit_price))
    CATEGORY_DICT = dict(zip(ipn, commodity))

    # 选择需要的列
    df = df[["IPN", "Total Stock"]]

    # 重命名列
    df = df.rename(columns={"Total Stock": "BOH"})

    # 按IPN分组求和
    df = df.groupby('IPN', as_index=False)["BOH"].sum()

    return df


def process_prf_data(data_path, dcr_data_path, data_type, upside_data=None):
    # 读取 Excel 文件
    pega_prf_data = pd.read_excel(data_path)
    # 将所有 NaN 替换为 0
    pega_prf_data.fillna(0, inplace=True)
    # 提取 mm_number 列以及 F 列之后的所有列
    columns_to_extract = ['mm_number'] + pega_prf_data.columns[5:].tolist()
    filtered_df = pega_prf_data[columns_to_extract]
    filtered_df.columns = ['Scode'] + list(filtered_df.columns[1:])
    prf_data = filtered_df
    if dcr_data_path and os.path.exists(dcr_data_path):
        dcr_data = read_dcr_data(dcr_data_path, data_type)
        prf_data = update_prf_from_dcr(filtered_df, dcr_data)
    prf_data[prf_data.columns[1]] = 0
    print(prf_data.iloc[:, 0:3])
    if upside_data:
        upside_scode_list = [f"{scode}{UPSIDE}" for scode in upside_data['Scode']]
        upside_data['Scode'] = upside_scode_list
        input_data = pd.DataFrame(upside_data)
        prf_data = validate_and_insert_data(prf_data, input_data)
    return prf_data


def read_dcr_data(data_path, data_type):
    """
    读取Excel数据，提取"MM#"、"YY/WW"和"To"三列，并删除"MM#"为空的行

    参数:
        data_path (str): Excel文件路径
        data_type (str): ODM

    返回:
        pd.DataFrame: 处理后的DataFrame，包含三列数据
    """
    # 读取上次的DCR数据
    try:
        last_dcr_data = pd.read_excel(last_dcr_data_path, sheet_name=f"{data_type}")
    except:
        last_dcr_data = pd.DataFrame(columns=['MM#', 'YY/WW', 'To'])

    now_year = datetime.now().year
    sheet_name = "DCR" if data_type == "PTI" else f"{now_year} DCR"
    # 读取Excel文件
    df = read_excel_with_dynamic_header(data_path, sheet_name)

    # 检查必需的列是否存在
    required_columns = ['MM#', 'YY/WW', 'To']
    missing_cols = [col for col in required_columns if col not in df.columns]

    if missing_cols:
        raise ValueError(f"Excel文件中缺少必要的列: {missing_cols}")

    # 选择需要的列
    selected_df = df[required_columns].copy()

    # 删除"MM#"为空的行
    selected_df = selected_df[selected_df['MM#'].notna()]

    # 合并last_dcr_data和selected_df
    # 使用'MM#'和'YY/WW'作为组合键来去重，保留selected_df中的行
    if not last_dcr_data.empty:
        # 创建合并键
        merge_key = ['MM#', 'YY/WW']

        # 使用concat然后drop_duplicates，保留selected_df中的重复项（因为selected_df在后面）
        combined_df = pd.concat([last_dcr_data[required_columns], selected_df[required_columns]], ignore_index=True)
        merged_df = combined_df.drop_duplicates(subset=merge_key, keep='last')
    else:
        merged_df = selected_df[required_columns].copy()

    # 重置索引（可选）
    merged_df.reset_index(drop=True, inplace=True)
    with ExcelWriter(last_dcr_data_path, engine='openpyxl', mode='a', if_sheet_exists='replace') as writer:
        merged_df.to_excel(writer, sheet_name=f"{data_type}", index=False)
    return merged_df


def update_prf_from_dcr(prf_data, dcr_data):
    # 确保周列名统一为字符串类型 (YYYYWW)
    prf_data.columns = [prf_data.columns[0]] + [str(col) for col in prf_data.columns[1:]]
    dcr_data['YY/WW'] = dcr_data['YY/WW'].astype(str)

    # Step 1: 将生产计划表转为长格式
    melted_df = prf_data.melt(

        id_vars='Scode',
        var_name='Week',
        value_name='Plan'
    )

    # Step 2: 与dcr_data合并更新
    merged_df = melted_df.merge(
        dcr_data,
        how='left',
        left_on=['Scode', 'Week'],
        right_on=['MM#', 'YY/WW']
    )

    # Step 3: 用To值覆盖Plan值（当To存在时）
    merged_df['Updated_Plan'] = merged_df['To'].combine_first(merged_df['Plan'])

    # Step 4: 转换回宽格式
    updated_df = merged_df.pivot_table(
        index='Scode',
        columns='Week',
        values='Updated_Plan'
    ).reset_index().rename_axis(columns=None)

    # 恢复原列名顺序（保持第一列为Scode，后续按周排序）
    updated_df = updated_df[prf_data.columns]

    return updated_df


def process_bom_data(data_path, data_type, upside_data=None):
    # 读取 Excel 文件
    correction_sheet_name = f"{data_type}_BOM"
    bom_data = pd.read_excel(data_path)
    correction_df = pd.read_excel(correction_data_path,
                                  sheet_name=correction_sheet_name,
                                  usecols=[0, 1])  # 读取A列和B列（索引0和1）
    print(correction_df)
    # 提取 mm_number 列以及 F 列之后的所有列
    columns_to_extract = ['MM#', 'Number', 'Quantity', 'Replacement Status', 'Replacement Type', 'Is Turnkey']
    filtered_df = bom_data[columns_to_extract]
    filtered_df = filtered_df[
        (filtered_df['Quantity'].notna())
        # & (filtered_df['Quantity'] != 0)
        & (filtered_df['Is Turnkey'] == 1)
        ]
    filtered_df.drop(columns=['Is Turnkey'], inplace=True)
    # filtered_df = filtered_df.groupby(['MM#', 'Number'], as_index=False).sum()
    filtered_df = filtered_df.drop_duplicates()
    filtered_df.columns = ['Scode', 'SPN', '所需物料数量', 'Replacement Status', 'Replacement Type']
    # print(filtered_df.iloc[:, 0:4])
    print(filtered_df.head())
    filtered_df = apply_ipn_corrections(filtered_df.copy(), correction_df, 'SPN')
    if upside_data:
        product_codes = upside_data.get('Scode', [])
        filtered_df = duplicate_bom_with_upside(product_codes, filtered_df)
    return filtered_df


def calculate_weekly_consumption(bom_df, stock_df, prf_df, output_path, end_week=None):
    # 数据加载函数（需自行实现）
    scrap_ratio = 0.003  # 报废率0.03%
    if 'cs' in output_path:
        scrap_ratio = 0.03  # 报废率0.3%
    # 过滤无效物料
    valid_materials = set(stock_df['IPN'])
    bom_df = bom_df[bom_df['SPN'].isin(valid_materials)].copy()

    # 初始化库存（深拷贝保证独立性）
    current_stock = stock_df.set_index('IPN')['BOH'].fillna(0).to_dict()

    # 处理周范围
    all_weeks = sorted([col for col in prf_df.columns if col.isdigit() and len(col) == 6])
    print(all_weeks)
    if end_week:
        selected_weeks = all_weeks[:all_weeks.index(end_week) + 1]
    else:
        selected_weeks = all_weeks

    with pd.ExcelWriter(output_path) as writer:
        for week in selected_weeks:
            print(f"Processing week: {week}")
            # 复制上周库存并应用进货
            week_stock = copy.deepcopy(current_stock)
            receipts_dict = stock_df.set_index('IPN')[week].fillna(0).to_dict() if week in stock_df.columns else {}

            # 应用本周进货（允许叠加）
            for ipn, qty in receipts_dict.items():
                week_stock[ipn] = week_stock.get(ipn, 0) + qty

            week_stock_before_consumption = copy.deepcopy(week_stock)

            # 获取生产计划
            production = prf_df[['Scode', week]].dropna(subset=[week])
            production.columns = ['product', 'qty']
            production = production[production['qty'] > 0]

            consumption_data = []

            # 处理每个产品
            for product_info in production.itertuples():
                product = product_info.product
                qty = product_info.qty
                print(f"Processing product: {product}")
                product_bom = bom_df[bom_df['Scode'] == product]
                if product_bom.empty:
                    continue

                i = 0
                while i < len(product_bom):
                    row = product_bom.iloc[i]
                    material = row['SPN']

                    if row['Replacement Status'] == 'Replacement':
                        # 处理替代料组
                        main_material = material
                        substitutes = []
                        j = i + 1
                        while j < len(product_bom) and product_bom.iloc[j]['Replacement Type'] == 'Substitute':
                            substitutes.append(product_bom.iloc[j]['SPN'])
                            j += 1

                        required_per_unit = row['所需物料数量']
                        total_demand = required_per_unit * qty  # 原始需求

                        # 获取有效替代料
                        valid_substitutes = [m for m in substitutes if m in week_stock]
                        group_materials = [main_material] + valid_substitutes

                        # 计算覆盖时间（基于进货后库存）
                        cover_times = {}
                        for mat in group_materials:
                            stock = week_stock_before_consumption.get(mat, 0)
                            stock = max(0, stock)
                            if mat == 'H95483-004':
                                print(mat, stock)
                            cover_times[mat] = stock / total_demand if total_demand != 0 else 0

                        total_cover = sum(cover_times.values())
                        if total_cover == 0:
                            # 无可用库存时主料承担全部需求
                            ratios = {main_material: 1.0}
                            for mat in substitutes:
                                if mat != main_material:
                                    ratios[mat] = 0.0
                        else:
                            ratios = {mat: (ct / total_cover) for mat, ct in cover_times.items()}
                        # 分配Consumption Volume（向上取整）
                        for mat in group_materials:
                            # 关键修改点：Consumption Volume向上取整
                            consume = math.ceil(total_demand * ratios[mat])
                            consume = consume + consume * scrap_ratio
                            week_stock[mat] -= consume  # 允许负库存

                            consumption_data.append({
                                'SPN': mat,
                                'Scode': product,
                                'Demand Qty': total_demand,  # 记录原始总需求
                                'Consumption Ratio': ratios[mat],
                                'Consumption Volume': consume  # 向上取整后的实际消耗
                            })
                        i = j
                    else:
                        # 处理普通物料
                        required = row['所需物料数量'] * qty
                        # 关键修改点：普通物料Consumption Volume也向上取整
                        required_ceil = math.ceil(required)
                        consume = required_ceil + required_ceil * scrap_ratio
                        if product in REPLACEMENT_GROUP.keys() and "cs" in output_path:
                            if material == "K84041-001":
                                bom_groups = REPLACEMENT_GROUP[product]
                                material_ratios = calculate_material_ratios(week_stock, bom_groups)
                                for mat_name, ratio in material_ratios.items():
                                    print(f"产品：{product}, 物料名：{mat_name}, 比例：{ratio}")
                                    mat_need = REPLACEMENT_PARTS[mat_name]
                                    print(f"物料名：{mat_name}, mat_need：{mat_need}")
                                    required = mat_need * qty
                                    print(f"物料名：{mat_name}, 需求量required：{required}")
                                    required_ceil = math.ceil(required)
                                    consume = required_ceil + required_ceil * scrap_ratio
                                    mat_consume = consume * ratio//100
                                    print(f"物料名：{mat_name}, 消耗量mat_consume：{mat_consume}")
                                    if mat_name in week_stock:
                                        week_stock[mat_name] -= mat_consume
                                        consumption_data.append({
                                            'SPN': mat_name,
                                            'Scode': product,
                                            'Demand Qty': required_ceil,  # 向上取整后的需求
                                            'Consumption Ratio': ratio,
                                            'Consumption Volume': mat_consume
                                        })
                            if material in REPLACEMENT_PARTS.keys():
                                i += 1
                                continue
                        if material in week_stock:
                            week_stock[material] -= consume  # 允许负库存
                            consumption_data.append({
                                'SPN': material,
                                'Scode': product,
                                'Demand Qty': required_ceil,  # 向上取整后的需求
                                'Consumption Ratio': 1.0,
                                'Consumption Volume': consume
                            })
                        i += 1

            # 构建输出表格
            if consumption_data:
                df = pd.DataFrame(consumption_data)
                final_df = pd.DataFrame(index=df['SPN'].unique())

                # 库存信息
                final_df['Current Inventory'] = final_df.index.map(
                    lambda x: week_stock_before_consumption.get(x, 0))
                final_df['Received Num'] = final_df.index.map(
                    lambda x: receipts_dict.get(x, 0))
                final_df['Net inventory after consumption'] = final_df.index.map(
                    lambda x: week_stock.get(x, 0))

                # 产品相关列
                products = production['product'].unique()
                for product in products:
                    product_data = df[df['Scode'] == product]
                    final_df[f'{product}_Demand Qty'] = product_data.groupby('SPN')['Demand Qty'].sum()
                    final_df[f'{product}_Consumption Ratio'] = product_data.groupby('SPN')['Consumption Ratio'].max()
                    final_df[f'{product}_Consumption Volume'] = product_data.groupby('SPN')['Consumption Volume'].sum()

                final_df = final_df.fillna(0).reset_index().rename(columns={'index': 'SPN'})
            else:
                final_df = pd.DataFrame(
                    columns=['SPN', 'Current Inventory', 'Received Num', 'Net inventory after consumption'])
                if week in stock_df.columns:
                    receipts_df = stock_df[['IPN', week]].rename(columns={'IPN': 'SPN', week: 'Received Num'})
                    receipts_df['Current Inventory'] = receipts_df['SPN'].map(current_stock) + receipts_df[
                        'Received Num']
                    receipts_df['Net inventory after consumption'] = receipts_df['Current Inventory']
                    final_df = pd.concat([final_df, receipts_df], ignore_index=True)

            # 列排序
            base_cols = ['SPN', 'Current Inventory', 'Received Num', 'Net inventory after consumption']
            product_cols = []
            for product in production['product'].unique():
                product_cols.extend([
                    f'{product}_Demand Qty',
                    f'{product}_Consumption Ratio',
                    f'{product}_Consumption Volume'
                ])
            final_df = final_df[base_cols + product_cols]

            # 写入Excel
            final_df.to_excel(writer, sheet_name=week, index=False)

            # 更新全局库存状态
            current_stock = week_stock


def generate_summary(weekly_excel_path, output_path):
    # 获取所有有效周编号
    all_weeks = sorted([
        sheet for sheet in pd.ExcelFile(weekly_excel_path).sheet_names
        if sheet.isdigit() and len(sheet) == 6
    ])

    summary = []
    for week in all_weeks:
        # 读取周数据
        weekly_df = pd.read_excel(weekly_excel_path, sheet_name=week)
        weekly_df['Initial Inventory'] = weekly_df['Current Inventory'] - weekly_df['Received Num']
        weekly_df['Closing Inventory'] = weekly_df['Net inventory after consumption']  # 保持原始计算结果
        weekly_df['Consumption Volume'] = weekly_df['Initial Inventory'] + weekly_df['Received Num'] - weekly_df[
            'Closing Inventory']
        # 构建记录（移除计划Received Num列）
        record = weekly_df[[
            'SPN', 'Initial Inventory', 'Received Num',
            'Consumption Volume', 'Closing Inventory'
        ]].copy()
        record['Week'] = week
        summary.append(record)

    # 合并数据并排序
    final_df = pd.concat(summary)[[
        'Week', 'SPN', 'Initial Inventory',
        'Received Num', 'Consumption Volume', 'Closing Inventory'
    ]]

    # 输出结果
    final_df.to_excel(output_path, sheet_name='Material Consumption Report', index=False)


def process_data(data_type, data_paths, end_week, upside_data=None):
    bom_path = data_paths.get('bom')
    stock_path = data_paths.get('ssdd')
    cs_ssdd_path = data_paths.get('consigned_ssdd')
    if "CS" in data_type and cs_ssdd_path:
        cs_upside_data = upside_data
        tk_upside_data = None
        data_type = data_type.split("(")[0]
        return process_cs_data(data_type, data_paths, end_week, cs_upside_data)
    else:
        cs_upside_data = None
        tk_upside_data = upside_data
    ret_code, ret_info = 0, 'Success'
    if cs_ssdd_path and "(" not in data_type:
        ret_code, ret_info =process_cs_data(data_type, data_paths, end_week, None)
    if not stock_path:
        return ret_code, ret_info
    prf_path = data_paths.get('prf')
    dcr_path = data_paths.get('dcr')
    inventory_path = data_paths.get('inventory')
    output_path = data_paths.get('output')
    data_type = data_type.split("(")[0]
    bom_df = process_bom_data(bom_path, data_type, tk_upside_data)
    if data_type == 'PTI':
        stock_df, lead_time_dict = process_pti_ssdd_data(stock_path, inventory_path)
    else:
        stock_df, lead_time_dict = process_pega_ssdd_data(stock_path, inventory_path)
    stock_df = stock_df.iloc[:, :stock_df.columns.get_loc(end_week) + 1]
    prf_df = process_prf_data(prf_path, dcr_path, data_type, tk_upside_data)
    prf_df = prf_df.iloc[:, :prf_df.columns.get_loc(end_week) + 1]
    detail_output_path = output_path + f'/{data_type}-detail_output.xlsx'
    try:
        calculate_weekly_consumption(
            bom_df=bom_df,
            stock_df=stock_df,
            prf_df=prf_df,
            output_path=detail_output_path,
            end_week=end_week  # 新增参数
        )
        materials_summary_output_path = output_path + f'/{data_type}-materials_summary_output.xlsx'
        products_summary_output_path = output_path + f'/{data_type}-products_summary_output.xlsx'
        generate_summary(detail_output_path, materials_summary_output_path)
        report = generate_production_report(prf_df, bom_df, detail_output_path, materials_summary_output_path,
                                            products_summary_output_path, lead_time_dict, tk_upside_data)
        if tk_upside_data:
            # upside_products = [product for product in upside_data.get('Scode', [])]
            upside_file_path = output_path + f'/{data_type}-upside_output.xlsx'
            save_df_with_format(report, upside_file_path)

    except Exception as e:
        print(f"An error occurred: {e}")
        import traceback
        print(traceback.format_exc())
        return 1, f"Failed: {e}"
    return 0, 'Success'


def process_cs_data(data_type, data_paths, end_week, upside_data=None):
    bom_path = data_paths.get('bom')
    stock_path = data_paths.get('consigned_ssdd')

    prf_path = data_paths.get('prf')
    dcr_path = data_paths.get('dcr')
    inventory_path = data_paths.get('inventory')
    output_path = data_paths.get('output')
    bom_df = process_cs_bom_data(bom_path, upside_data)

    if data_type == 'PTI':
        stock_df, lead_time_dict = process_consigned_pti_ssdd(stock_path, None)
    else:
        stock_df, lead_time_dict = process_consigned_pega_ssdd(stock_path, None)
    stock_df = stock_df.iloc[:, :stock_df.columns.get_loc(end_week) + 1]
    prf_df = process_prf_data(prf_path, dcr_path, data_type, upside_data)
    prf_df = prf_df.iloc[:, :prf_df.columns.get_loc(end_week) + 1]
    detail_output_path = output_path + f'/{data_type}-cs-detail_output.xlsx'
    try:
        calculate_weekly_consumption(
            bom_df=bom_df,
            stock_df=stock_df,
            prf_df=prf_df,
            output_path=detail_output_path,
            end_week=end_week  # 新增参数
        )
        materials_summary_output_path = output_path + f'/{data_type}-cs-materials_summary_output.xlsx'
        products_summary_output_path = output_path + f'/{data_type}-cs-products_summary_output.xlsx'
        generate_summary(detail_output_path, materials_summary_output_path)
        report = generate_production_report(prf_df, bom_df, detail_output_path, materials_summary_output_path,
                                            products_summary_output_path, lead_time_dict, upside_data)
        if upside_data:
            # upside_products = [product for product in upside_data.get('Scode', [])]
            upside_file_path = output_path + f'/{data_type}-upside_output.xlsx'
            save_df_with_format(report, upside_file_path)

    except Exception as e:
        print(f"An error occurred: {e}")
        import traceback
        print(traceback.format_exc())
        return 1, f"Failed: {e}"
    return 0, 'Success'
def generate_production_report(prf_df, bom_df, detail_path, summary_path, output_path, lead_time_dict, upside_data):
    # 加载数据
    summary_df = pd.read_excel(summary_path)
    detail_dfs = pd.read_excel(detail_path, sheet_name=None)
    critical_product = 'AA001990U'
    critical_material = 'AA000503S'
    print(len(missing_ipns))
    realtime_material_stock = {}
    report = []
    start_week = int(prf_df.columns[1])
    material_inventory = (
        summary_df.query("Week == @start_week")
        .set_index('SPN')['Initial Inventory']
        .to_dict()
    )
    product_plan = defaultdict(dict)
    for week in prf_df.columns[2:]:  # 遍历周列
        if week not in detail_dfs:
            continue  # 跳过无detail表的周

        week_detail = detail_dfs[week]
        week_summary = summary_df[summary_df['Week'] == int(week)]
        # print(f"Processing week {week}  type:{type(week)}")
        if critical_material in material_inventory:
            print(f"{critical_material}库存更新为: {material_inventory[critical_material]}")
        print("==" * 20 + f"Processing week {week}" + "==" * 20)

        for product in prf_df['Scode'].unique():
            print(f"Processing product {product}")
            material_not_found = False
            # if product != critical_product:
            #     continue
            plan_qty = prf_df.loc[prf_df['Scode'] == product, week].values[0]
            if plan_qty <= 0:
                continue

            # 获取BOM需求
            bom = bom_df[bom_df['Scode'] == product]
            required_materials = bom[['SPN', '所需物料数量']].set_index('SPN').to_dict()['所需物料数量']

            actual_qty = float('inf')  # 初始化为无限大
            shortage_info = []
            shortage_data = []
            if UPSIDE in product and f'{product}_Consumption Volume' not in week_detail.columns:
                continue
            for material, unit_need in required_materials.items():
                # if unit_need <= 0:  # 跳过替代料占位符
                #     continue

                ##############################################
                # 关键修改点：处理物料不存在于detail表的情况
                ##############################################
                # 获取Closing Inventory（从summary表）
                stock_info = week_summary[week_summary['SPN'] == material]
                if material == critical_material:
                    print(f"{material} len(stock_info): {len(stock_info)}")
                if material not in realtime_material_stock:
                    start_stock = stock_info['Initial Inventory'].values[0] if not stock_info.empty else 0
                    supply_qty = stock_info['Received Num'].values[0] if not stock_info.empty else 0
                    ending_stock = start_stock + supply_qty
                    realtime_material_stock[material] = ending_stock
                else:
                    ending_stock = realtime_material_stock[material]
                if material == critical_material:
                    print(f"{material} ending stock: {ending_stock}")
                # 获取Consumption Volume（从detail表，不存在则视为0）
                consumed_rate = 0
                demand_qty = 0
                if material in week_detail['SPN'].values:
                    consumed = get_consumption_value(week_detail, material, product, 'Consumption Volume')
                    consumed = max(0, consumed)
                    demand_qty = get_consumption_value(week_detail, material, product, 'Demand Qty')
                    demand_qty = max(0, demand_qty)
                    realtime_material_stock[material] -= consumed
                    if material == critical_material:
                        print(f"{material} consumed: {consumed}")
                    consumed_rate = get_consumption_value(week_detail, material, product, 'Consumption Ratio')
                    consumed_rate = max(0, consumed_rate)
                    if consumed_rate == 0:
                        continue
                    if material == critical_material:
                        print(f"{material} consumed rate: {consumed_rate}")
                    if UPSIDE in product:
                        if material in PCB_LTIM_LIST and material in missing_ipns:
                            PCB_DICT[material] = round(max((consumed - ending_stock), 0), 4)
                    if consumed > 0 and ending_stock < consumed:
                        all_shortage = round(min((consumed - ending_stock), consumed), 4)
                        shortage_item = f"{material}(Shortage{all_shortage})"
                        shortage_data_item = {'material': material,
                                              'shortage': all_shortage,
                                              'unit_need': float(unit_need * consumed_rate * 1.003)}
                        if UPSIDE in product:
                            print(f"{Separator} {UPSIDE} all shortage: {Separator}")
                            print(f"{week} all shortage: {material}")
                            all_shortage = round((consumed - ending_stock), 4)
                            shortage_item = f"{material}(Shortage{all_shortage})"
                            print(f"{material} all shortage: {all_shortage}")
                            print(f"{Separator} all shortage: {Separator}")

                            if material in missing_ipns:
                                shortage_item += f"(no record in SSDD)"
                                if material in PCB_LTIM_LIST:
                                    pass
                                    # shortage_item += f"->PCB or LTIM "
                                else:
                                    OPO_DICT[material] = all_shortage
                        shortage_info.append(shortage_item)
                        shortage_data.append(shortage_data_item)
                    else:
                        if UPSIDE in product:
                            if material in missing_ipns:
                                shortage_item = f"{material}(no record in SSDD)"
                                shortage_info.append(shortage_item)
                    if consumed_rate > 0:
                        consumed = consumed // consumed_rate
                    else:
                        continue
                else:
                    if unit_need > 0:
                        material_not_found = True
                        shortage_info.append(f"{material}(No Material record in SSDD)")

                # 计算可用量
                available = ending_stock
                if material == critical_material:
                    print(f"{material} available: {available}")
                if available < 0:
                    available = 0  # 负库存按0处理

                # 计算理论最大产量
                if consumed_rate > 0:
                    available = available // consumed_rate
                    unit_need = demand_qty // plan_qty
                else:
                    continue

                max_possible = available // unit_need if unit_need > 0 else available

                lead_time = lead_time_dict.get(material)
                if max_possible < plan_qty and lead_time:
                    lead_week = math.ceil(lead_time / 7)
                    if int(week) - start_week > lead_week:
                        max_possible = plan_qty
                        SLT_DICT[material] = 'SLT'
                        if not upside_data:
                            if len(shortage_info) >= 1 and "SLT" not in shortage_info[-1]:
                                shortage_info[-1] = f"{shortage_info[-1]}-SLT"
                if material == critical_material:
                    print(f"{material} unit_need: {unit_need}")
                    print(f"{material} max possible: {max_possible}")

                # 更新实际产量限制
                if max_possible < actual_qty:
                    if material == critical_material:
                        print(f"{material} max possible updated to {max_possible}")
                    actual_qty = max_possible
                    if material == critical_material:
                        print(f"{material} actual_qty updated to {actual_qty}")
                    # if available < unit_need * plan_qty:
                    #     shortage = unit_need * plan_qty - available
                    #     shortage_info.append(f"{material}(缺{shortage})")

            # 最终产量修正
            actual_qty = min(actual_qty, plan_qty) if actual_qty != float('inf') else plan_qty
            achievement_rate = actual_qty / plan_qty if plan_qty and not material_not_found > 0 else 0

            print(f"{product} achieved rate: {achievement_rate}")
            report.append({
                'Week': week,
                'S-Code': product,
                'Baseline Demand': plan_qty,
                'Full kit Num': actual_qty,
                'Full kit Rate': f"{round(achievement_rate * 100, 2)}%",
                'Shortage Materials': ', '.join(shortage_info) if shortage_info else 'No Shortage'
            })
            product_plan[product][week] = {
                'Baseline Demand': plan_qty,
                'Full kit Num': actual_qty,
                'Full kit Rate': achievement_rate,
                'shortage_info': shortage_data
            }
        realtime_material_stock.clear()
    replenishment = {}
    for row in summary_df.itertuples():

        week = row[1]
        spn = row[2]
        received_num = row[4]
        # 为当前周创建空字典（如果不存在）
        if week not in replenishment:
            replenishment[week] = {}

        # 添加物料及其进货量
        if received_num > 0:
            replenishment[week][spn] = int(received_num)
    print("replenishment: ", replenishment)
    updated = allocate_replenishments_future_only(product_plan, replenishment)
    report = update_report(report, updated)
    report_df = pd.DataFrame(report)
    if upside_data:
        report_df = process_shortage_materials(report_df, lead_time_dict)
    save_df_with_format(report_df, output_path)
    upside_df = report_df[report_df['S-Code'].astype(str).str.contains(UPSIDE, na=False)]
    return upside_df


def validate_and_insert_data(filtered_df, input_data):
    """
    插入新数据并校验周名称是否存在于原表

    :param filtered_df: 原表DataFrame（格式：Scode + YYYYWW周名称列）
    :param input_data: 输入数据，长格式DataFrame（三列：Scode, 周名称, 数量）
    :return: 合并后的DataFrame
    """
    # --- 校验步骤 ---
    # 获取原表中所有有效周名称
    valid_weeks = filtered_df.columns[1:].tolist()  # 假设第一列是Scode

    # 检查输入数据中的周名称是否全部存在于原表
    input_weeks = input_data['周名称'].unique().tolist()
    invalid_weeks = [week for week in input_weeks if week not in valid_weeks]

    if invalid_weeks:
        raise ValueError(f"The following weeks is out of range.: {invalid_weeks}")

    # --- 数据转换 ---
    # 将长格式输入数据转换为宽格式
    pivot_input = input_data.pivot(
        index='Scode',
        columns='周名称',
        values='数量'
    ).reset_index().rename_axis(None, axis=1)

    # --- 合并数据 ---
    # 使用 concat 合并（不自动填充NaN，需后续处理）
    merged_df = pd.concat([filtered_df, pivot_input], ignore_index=True)
    return merged_df


def duplicate_bom_with_upside(product_codes: list, bom_df: pd.DataFrame) -> pd.DataFrame:
    """
    复制BOM表中指定Scode的数据，并重命名Scode以追加到原表

    Args:
        product_codes (list): 需要复制的Scode列表（必须存在于原表中）
        bom_df (pd.DataFrame): 原始BOM表（列需包含'Scode'）

    Returns:
        pd.DataFrame: 包含新增数据的新表

    Raises:
        ValueError: 如果输入的Scode不在原表中
    """
    # 校验输入非空
    if not product_codes:
        raise ValueError("S-code cannot be empty")

    # 获取原表中所有存在的Scode
    existing_products = bom_df['Scode'].unique()

    # 检查无效编码
    invalid_codes = [code for code in product_codes if code not in existing_products]
    if invalid_codes:
        raise ValueError(f"The following S-codes do not exist: {invalid_codes}")

    # 筛选需要复制的行（深拷贝避免链式赋值警告）
    filtered = bom_df[bom_df['Scode'].isin(product_codes)].copy()

    # 重命名Scode
    filtered.loc[:, 'Scode'] = filtered['Scode'] + '(Upside)'

    # 合并新旧数据
    new_bom = pd.concat([bom_df, filtered], ignore_index=True)

    return new_bom


def filter_report_by_s_code(s_codes: list, report: list) -> list:
    """
    根据指定的S-Code列表筛选报告数据

    Args:
        s_codes (list): 需要筛选的S-Code列表（如 ['P1001', 'P1002']）
        report (list): 原始报告数据（字典列表结构）

    Returns:
        list: 过滤后的报告，仅包含匹配的S-Code条目

    Example:
        >> report = [
        ...    {'Week': '202401', 'S-Code': 'P1001', 'PRF': 100},
        ...    {'Week': '202402', 'S-Code': 'P1002', 'PRF': 200}
        ... ]
        >> filter_report_by_s_code(['P1001'], report)
        [{'Week': '202401', 'S-Code': 'P1001', 'PRF': 100}]
    """
    # 创建s_codes集合提高查询效率（O(1)查找）
    valid_codes = set(s_codes)
    # print(valid_codes)
    # print(report)
    # 筛选逻辑（保留原始顺序）
    return [
        entry for entry in report
        if entry.get('S-Code') in valid_codes
    ]


def process_spacial_data(df):
    # 条件1：Group ID为313
    group_mask = df['Group ID'] == SPACIAL_ALT_ID

    # 条件2：从第三列开始全为0
    zero_mask = (df.iloc[:, 2:] == 0).all(axis=1)

    # 组合条件
    combined_mask = group_mask & zero_mask

    # 修改第一个符合条件的IPN
    if combined_mask.any():
        first_match_idx = df[combined_mask].index[0]
        df.loc[first_match_idx, 'IPN'] = SPACIAL_IPN

    # 删除Group ID列
    # df = df.drop(columns=['Group ID'])

    return df


def get_consumption_value(df, material, product, column_suffix):
    target_col = f'{product}_{column_suffix}'
    if target_col not in df.columns:
        print(f"Getting consumption value for {material} in {product} with suffix {column_suffix}")
        return 0
    values = df.loc[df['SPN'] == material, target_col].values
    return values[0] if len(values) > 0 else 0


def create_unique_dict(df: pd.DataFrame, key_col: str, value_col: str) -> dict:
    """
    从DataFrame创建字典：排除空值，按key_col去重，key_col的值作为键，value_col的值作为值

    参数：
        df : 输入的DataFrame
        key_col : 作为字典键的字段名
        value_col : 作为字典值的字段名

    返回：
        dict: 去重后生成的字典，不包含空值
    """
    # 创建两个字段的副本，避免修改原始数据
    filtered_df = df[[key_col, value_col]].copy()

    # 过滤掉任意一列为空值的行
    filtered_df = filtered_df.dropna(subset=[key_col, value_col])

    # 按key_col分组，获取value_col的最大值
    result_dict = filtered_df.groupby(key_col)[value_col].max().to_dict()

    return result_dict


def update_report(report, updated):
    for entry in report:
        s_code = entry['S-Code']
        week = str(entry['Week'])  # 确保week格式与updated中的key一致

        # 检查updated中是否存在对应的产品和周数据
        if s_code in updated and week in updated[s_code]:
            week_data = updated[s_code][week]

            # 提取并转换Adjusted Full kit Rate（浮点数 → 百分比字符串）
            adj_rate = week_data.get("Adjusted Full kit Rate", 0.0)
            adj_rate_str = f"{round(adj_rate * 100, 2)}%"

            # 更新report条目
            entry['Adjusted Full kit Rate'] = adj_rate_str
            entry['Recovery Week'] = week_data.get("update_from", "")

    return report


def allocate_replenishments_future_only(product_plan: dict, replenishment: dict) -> dict:
    updated = copy.deepcopy(product_plan)
    week_stock = {wk: {mat: int(q) for mat, q in replenishment[wk].items()} for wk in replenishment}

    # 周次转换函数
    def w2i(w):
        try:
            return int(w)
        except:
            return w

    # 按周次优先再产品顺序排序
    product_order = list(product_plan.keys())
    for pid in updated:
        weeks = list(updated[pid].keys())
        # 排除非周次字段
        week_keys = [w for w in weeks if w.isdigit() or w2i(w)]
        updated[pid]['_sorted_weeks'] = sorted(week_keys, key=w2i)

    # 按周次排序的产品列表
    pids_sorted = sorted(product_order, key=lambda pid: (
        min(w2i(w) for w in updated[pid]['_sorted_weeks']),  # 最早周次
        product_order.index(pid)
    ))

    # 主处理逻辑
    for pid in pids_sorted:
        pdata = updated[pid]
        for week in pdata['_sorted_weeks']:
            week_data = pdata[week]
            pweek = w2i(week)
            baseline = int(week_data.get('Baseline Demand', 0))
            fullkit = int(week_data.get('Full kit Num', 0))

            # 初始化补产结果
            week_data['Adjusted Full kit Num'] = fullkit
            week_data['Adjusted Full kit Rate'] = round(fullkit / baseline, 3) if baseline > 0 else 0

            if fullkit >= baseline:
                continue

            # 获取短缺信息
            shortages = copy.deepcopy(week_data.get('shortage_info', []))
            if not shortages:
                continue

            # 筛选可用补货周
            future_weeks = sorted([wk for wk in week_stock.keys() if w2i(wk) > pweek], key=w2i)
            last_update_week = None

            # 补货处理
            for rw in future_weeks:
                per_material_units = []
                for s in shortages:
                    mat = s['material']
                    unit_need = int(s['unit_need'])
                    shortage_qty = int(s['shortage'])
                    inv_qty = week_stock[rw].get(mat, 0)

                    if unit_need <= 0:
                        per_material_units.append(0)
                        continue
                    per_material_units.append(min(inv_qty // unit_need, shortage_qty // unit_need))

                if not per_material_units:
                    continue

                add_units = min(per_material_units)
                slack = baseline - fullkit
                add_units = min(add_units, slack)

                if add_units <= 0:
                    continue

                # 更新库存和短缺信息
                for s in shortages:
                    mat = s['material']
                    unit_need = int(s['unit_need'])
                    use_qty = add_units * unit_need
                    s['shortage'] = max(0, int(s['shortage']) - use_qty)
                    week_stock[rw][mat] = max(0, week_stock[rw].get(mat, 0) - use_qty)

                fullkit += add_units
                week_data['Adjusted Full kit Num'] = fullkit
                week_data['Adjusted Full kit Rate'] = round(fullkit / baseline, 3) if baseline > 0 else 0
                last_update_week = rw

                if fullkit >= baseline:
                    break

            if last_update_week:
                week_data['update_from'] = last_update_week

    return updated


def process_shortage_materials(df, lead_time_dict):
    """
    处理包含 "Shortage Materials" 列的 DataFrame
    将逗号分隔的物料条目拆分成多行，并提取物料编号和缺料数量
    同时匹配 category_df 中的 SPN 信息

    参数:
    df (pd.DataFrame): 包含 "Shortage Materials" 列的原始 DataFrame
    category_df (pd.DataFrame): 包含 SPN、Category 和 U/P 信息的分类数据

    返回:
    pd.DataFrame: 处理后的新 DataFrame
    """

    # 创建副本避免修改原始数据
    result_df = df.copy()

    # 步骤1: 拆分逗号分隔的字符串并展开为多行
    result_df = (
        result_df.assign(
            # 将字符串拆分成列表
            Materials_List=lambda x: x['Shortage Materials'].str.split(',')
        )
        # 展开列表为多行
        .explode('Materials_List')
        # 重置索引并保留原始索引
        .reset_index(drop=False)
        .rename(columns={'index': 'original_index'})
    )

    # 步骤2: 提取物料编号和缺料数量
    pattern = r'^(.*?)\(Shortage([\d.]+)\)$'
    extracted = result_df['Materials_List'].str.extract(pattern)

    # 步骤3: 合并提取结果
    result_df = pd.concat([
        result_df,
        extracted.rename(columns={0: 'SPN', 1: 'Shortage_Qty'})
    ], axis=1)

    # 步骤4: 清理数据
    result_df['SPN'] = result_df['SPN'].str.strip()
    result_df['Shortage_Qty'] = pd.to_numeric(result_df['Shortage_Qty'], errors='coerce')
    result_df['Shortage_Qty'] = result_df['Shortage_Qty'].fillna(0)
    result_df['Shortage_Qty'] = (-result_df['Shortage_Qty']).astype(int)

    # 步骤5: 匹配 category_df 中的信息
    # 创建 SPN 的副本用于匹配（避免修改原始数据）
    result_df['SPN_for_match'] = (
        result_df['SPN']
        .astype(str)  # 将整个列转换为字符串类型
        .str.split("/")
        .str[0]
        .str.split("_")
        .str[0]
    )


    # 处理未匹配到的值
    result_df['Category'] = result_df['SPN_for_match'].map(
        lambda x: CATEGORY_DICT.get(x, '') if pd.notnull(x) else ''
    )
    result_df['U/P'] = result_df['SPN_for_match'].map(
        lambda x: UP_DICT.get(x, 0) if pd.notnull(x) else 0
    )

    # 强制转换成数值（非法值会变 NaN）
    result_df['U/P_num'] = pd.to_numeric(result_df['U/P'], errors='coerce')
    result_df['Shortage_Qty_num'] = pd.to_numeric(result_df['Shortage_Qty'], errors='coerce')

    condition = (
            result_df['Shortage_Qty_num'].notna() &
            result_df['U/P_num'].notna() &
            (result_df['U/P_num'] != 0)
    )

    result_df['Amount'] = np.where(
        condition,
        result_df['U/P_num'] * result_df['Shortage_Qty_num'],  # 确保是数值运算
        ""
    )

    result_df['LT（Days）'] = result_df['SPN'].map(lead_time_dict).fillna('')

    # 步骤6: 从 SLT_DICT 中获取 SLT 值并创建 Remark 列
    result_df['Remark'] = result_df['SPN'].map(SLT_DICT).fillna('')

    result_df.loc[result_df['U/P'] == "Consign", 'Amount'] = "CS Materials"


    # 步骤7: 删除临时列并重新排列列
    result_df = result_df.drop(columns=[
        'original_index',
        'Materials_List',
        'Shortage Materials',
        'SPN_for_match',
        'Shortage_Qty_num',
        'U/P_num',
    ])  # 重命名保留的 SPN 列
    result_df["Amount"] = pd.to_numeric(result_df["Amount"], errors="coerce")

    # 删除SPN列为空，并且Full kit Rate列不为100.0%的行
    result_df = result_df[~((result_df['SPN'].isna()) & (result_df['Full kit Rate'] != '100.0%'))]
    # 重置索引并返回
    return result_df.reset_index(drop=True)


def save_df_with_format(df, output_path):
    """
    将DataFrame保存为Excel文件，并应用格式：
    1. 整个表格添加边框
    2. 自动调整列宽（以表头长度为准）
    3. 所有单元格内容居中
    4. 指定列数值显示千分符
    """
    with pd.ExcelWriter(output_path, engine='openpyxl') as writer:
        df.to_excel(writer, index=False, sheet_name='Sheet1')
        workbook = writer.book
        worksheet = writer.sheets['Sheet1']

        # 定义边框样式
        thin_border = Border(
            left=Side(style='thin'),
            right=Side(style='thin'),
            top=Side(style='thin'),
            bottom=Side(style='thin')
        )

        # 定义居中样式
        center_alignment = Alignment(horizontal='center', vertical='center')

        # 需要千分位格式的列
        thousand_sep_cols = ["Baseline Demand", "Full kit Num", "Shortage_Qty"]

        # 为整个表格设置样式
        for row in range(1, len(df) + 2):  # 行数：标题行 + 数据行
            for col in range(1, len(df.columns) + 1):
                cell = worksheet.cell(row=row, column=col)
                cell.border = thin_border
                cell.alignment = center_alignment

                # 如果是需要千分位格式的列，且不是表头行
                col_name = df.columns[col - 1]
                if col_name in thousand_sep_cols and row > 1:
                    cell.number_format = '#,##0'  # 千分位整数
                elif col_name == "Amount":
                    try:
                        float(cell.value)
                        # 千分位 + 2位小数，负数红色显示
                        cell.number_format = '#,##0.00'
                    except (TypeError, ValueError):
                        pass



        # 自动调整列宽（以表头为准）
        for col_idx, column_name in enumerate(df.columns, 1):
            col_letter = get_column_letter(col_idx)
            # worksheet.column_dimensions[col_letter].width = len(str(column_name)) + 2
            worksheet.column_dimensions[col_letter].width = 20


# 使用示例

if __name__ == '__main__':
    pega_ssdd_data_path = r'../data/PEGA_SSDD WW25.xlsx'
    pti_ssdd_data_path = r'../data/1018/PTI_SSDD_W39 PRF.XLSX'
    cs_pti_ssdd_data_path = r'../data/1018/Solidigm_PTI CS Shortage Report_2025_WW39_0924.xlsx'
    pega_prf_data_path = r'../new_data/PEGA PRF W06.xlsx'
    pti_prf_data_path = r'../data/1018/PTI_ODM_PRF_WW39.xlsx'
    bom_data_path = r'../data/1018/BOM combine review BB.xlsx'
    pti_inventory_path = r'../test/0908/20250826_(To Solidigm)_Aging & Expired Report.xlsx'
    pega_inventory_path = r'../test/PEGA inventory-2025.07.xlsb'
    pti_dcr_data_path = r"../test/DCR_TW02_NPSG_WW30.2'25_V2.xlsx"
    output_path = r'../data/1018'
    data_type = 'PTI'
    detail_output_path = r'../data/detail_output.xlsx'
    end_week = '202542'
    date_time_str = datetime.now().strftime('%Y%m%d%H%M%S')
    upside_data = {
        "ODM": [
            "PTI", "PTI",
        ],
        "Scode": [
            "AA001559J", "AA001559J",
        ],
        "周名称": [
            "202545", "202546",
        ],
        "数量": [
            30000, 35000,
        ],
        # "is_lead_time": [
        #     True
        # ]
    }
    # upside_data = None
    # inventory_df, lead_time_dict = process_pega_ssdd_data(pega_ssdd_data_path, pega_inventory_path)
    # inventory_df.to_excel(f'../test/pega_stock-{date_time_str}.xlsx', index=False)

    # result = process_replace_data(bom_data_path)
    # production_plan_df = process_prf_data(pti_prf_data_path, pti_dcr_data_path, data_type)
    # bom_df = process_bom_data(bom_data_path, data_type)
    # pti_stock_df, pti_lead_time_dict = process_pti_ssdd_data(pti_ssdd_data_path, pti_inventory_path)
    # pti_stock_df.to_excel(f'../test/pti_stock-{date_time_str}.xlsx', index=False)
    # production_plan_df.to_excel(f'../test/production_plan_df-{date_time_str}.xlsx', index=False)
    # bom_df.to_excel(f'{output_path}/bom_dfsss.xlsx', index=False)
    # read_pti_inventory_data(pti_inventory_path)
    # read_dcr_data(pti_dcr_data_path, data_type)
    # print(result)  # 打印过滤
    # # 执行计算并输出
    data_paths = {
        'bom': bom_data_path,
        'ssdd': pti_ssdd_data_path,
        'consigned_ssdd': cs_pti_ssdd_data_path,
        'prf': pti_prf_data_path,
        'inventory': pti_inventory_path,
        'dcr': '',
        'output': output_path,
    }
    process_data(data_type, data_paths, end_week, upside_data)
    # process_cs_data(data_type, data_paths, end_week, upside_data=None)
    print(f"PCB:{PCB_DICT}, OPO:{OPO_DICT}, SLT:{SLT_DICT}")
