import traceback
import numpy as np
import pandas as pd
from datetime import datetime, timedelta


def process_sheet_data(ssdd_df, indexes):
    # 获取第J列（索引为9）的数据，从第三行开始（表头是第三行，索引为2）
    start_week_index, ipn_index, pf_index = indexes
    column_j_data = ssdd_df.iloc[2:, start_week_index - 2]  # 从第三行开始

    # 定义需要保留的行的条件
    target_values = ["Supply(CFM) ", "Ending OH (all source)"]

    # 找到符合条件的行索引
    mask = column_j_data.isin(target_values)

    # 筛选出符合条件的行（包括所有列），从表头行开始
    filtered_data = ssdd_df.iloc[2:][mask]

    # 重置索引
    filtered_data = filtered_data.reset_index(drop=True)

    # 使用第三行作为表头
    header_row = ssdd_df.iloc[2]  # 第三行作为表头（索引为2）
    new_columns = header_row.tolist()

    # 处理列名，将M列开始的日期加7天后再转换为YYYYWW格式
    for i in range(start_week_index + 2, len(new_columns)):  # 从M列（索引12）开始
        if isinstance(new_columns[i], pd.Timestamp) or isinstance(new_columns[i], datetime):
            # 先加7天
            date_obj = new_columns[i]
            # 转换为YYYYWW格式
            year = date_obj.isocalendar()[0]
            week = date_obj.isocalendar()[1]
            new_columns[i] = f"{year}{week:02d}"

    filtered_data.columns = new_columns

    # 删除原来的表头行（现在是数据的第一行）
    filtered_data = filtered_data.drop(filtered_data.index[0]).reset_index(drop=True)

    # 删除第2列到第9列（索引从1到8）
    columns_to_keep = [ipn_index] + [pf_index] + list(range(start_week_index + 1, len(filtered_data.columns)))
    filtered_data = filtered_data.iloc[:, columns_to_keep]

    # 重命名前三列
    filtered_data.columns.values[0] = 'IPN'
    filtered_data.columns.values[1] = 'Product Family'
    filtered_data.columns.values[2] = 'BOH'

    # 将"Product Family"列值为"Ending OH (all source)"的行从第四列开始的所有单元格置为0
    ending_oh_mask = filtered_data['Product Family'] == "Ending OH (all source)"
    filtered_data.loc[ending_oh_mask, filtered_data.columns[3]:] = 0

    # 删除Product Family列
    filtered_data = filtered_data.drop(columns=['Product Family'])
    filtered_data = filtered_data.iloc[:, :50]
    # 按IPN列求和
    filtered_data = filtered_data.groupby('IPN', as_index=False).sum()

    return filtered_data


def process_pega_sheet_data(ssdd_df, indexes):
    print("Processing PEGA sheet data...")
    start_week_index, ipn_index, pf_index, supply_name, eoh_name = indexes

    # 处理表头：日期列转为 YYYYWW 格式
    new_columns = []
    for col in ssdd_df.columns:
        try:
            dt = pd.to_datetime(col, errors="raise")
            new_columns.append(dt.strftime("%Y%W"))  # 周一为一周开始
        except Exception:
            new_columns.append(col)
    ssdd_df.columns = new_columns

    # 取指定列：ipn_index、pf_index，以及 start_week_index 到最后
    selected_cols = [ssdd_df.columns[ipn_index], ssdd_df.columns[pf_index]] + list(ssdd_df.columns[start_week_index:])
    ssdd_df = ssdd_df[selected_cols]

    # 只保留 Content 列为 supply_name 或 eoh_name 的行
    ssdd_df = ssdd_df[ssdd_df["Content"].isin([supply_name, eoh_name])]

    # 修改行数据
    # 第三列索引位置是 2
    third_col = ssdd_df.columns[2]

    # supply_name → 第三列置 0
    ssdd_df.loc[ssdd_df["Content"] == supply_name, third_col] = 0

    # eoh_name → 第三列之后的所有列置 0
    cols_after_third = ssdd_df.columns[3:]
    ssdd_df.loc[ssdd_df["Content"] == eoh_name, cols_after_third] = 0

    # 删除 Content 列，并且重命名第一列为 "IPN"
    ssdd_df = ssdd_df.drop(columns=["Content"])
    ssdd_df = ssdd_df.rename(columns={ssdd_df.columns[0]: "IPN"})

    # 按 IPN 列求和
    ssdd_df = ssdd_df.groupby('IPN', as_index=False).sum()

    ssdd_df = ssdd_df.iloc[:, :60]

    return ssdd_df


def process_consigned_pti_ssdd(data_path, inventory_path=None):
    # 定义需要处理的sheet名称
    sheet_names = {'NAND': (11, 0, 9), 'DRAM': (15, 1, 13), 'ASIC': (16, 0, 14)}
    print(data_path)
    # 存储每个sheet处理后的结果
    processed_data_list = []

    # 处理每个sheet
    for sheet_name, indexes in sheet_names.items():
        try:
            # 读取Excel文件中的工作表
            ssdd_df = pd.read_excel(data_path, sheet_name=sheet_name)

            # 处理数据
            processed_data = process_sheet_data(ssdd_df, indexes)

            # 添加一列标识数据来源
            processed_data['Source'] = sheet_name

            # 添加到列表中
            processed_data_list.append(processed_data)
        except Exception as e:
            print(f"处理{sheet_name}时出错: {e}")
            traceback.print_exc()

    # 合并所有处理后的数据
    if processed_data_list:
        combined_data = pd.concat(processed_data_list, ignore_index=True)

        # 按IPN列再次求和，合并来自不同sheet的相同IPN
        combined_data = combined_data.groupby('IPN', as_index=False).sum()

        # 删除Source列（因为我们已经合并了所有sheet的数据）
        if 'Source' in combined_data.columns:
            combined_data = combined_data.drop(columns=['Source'])

        return combined_data, {}
    else:
        # 如果没有成功处理任何sheet，返回空的DataFrame
        return pd.DataFrame(), {}


def process_consigned_pega_ssdd(data_path, inventory_path=None):
    # 定义需要处理的sheet名称
    sheet_names = {'NAND': (10, 0, 6, "Supply(CFM)", "EndingOH"),
                   'DRAM': (17, 1, 13, "Supply(CFM) ", "Ending OH (all source)"),
                   'ASIC': (17, 0, 13, "Supply(CFM) ", "Ending OH (all source)")}

    # 存储每个sheet处理后的结果
    processed_data_list = []

    # 处理每个sheet
    for sheet_name, indexes in sheet_names.items():
        try:
            # 读取数据，跳过前 75 行，把第 75 行作为列名
            ssdd_df = pd.read_excel(data_path, sheet_name=sheet_name, header=74)

            # 处理数据
            processed_data = process_pega_sheet_data(ssdd_df, indexes)

            # 添加一列标识数据来源
            processed_data['Source'] = sheet_name

            # 添加到列表中
            processed_data_list.append(processed_data)
        except Exception as e:
            print(f"处理{sheet_name}时出错: {e}")
            traceback.print_exc()
    #
    # 合并所有处理后的数据
    if processed_data_list:
        combined_data = pd.concat(processed_data_list, ignore_index=True)

        # 按IPN列再次求和，合并来自不同sheet的相同IPN
        combined_data = combined_data.groupby('IPN', as_index=False).sum()

        # 删除Source列（因为我们已经合并了所有sheet的数据）
        if 'Source' in combined_data.columns:
            combined_data = combined_data.drop(columns=['Source'])

        return combined_data
    else:
        # 如果没有成功处理任何sheet，返回空的DataFrame
        return pd.DataFrame()


def process_cs_bom_data(data_path, upside_data=None):
    """
    处理CS BOM数据，支持与upside数据合并，并返回CODE2物料组

    参数:
        data_path (str): BOM Excel文件路径
        upside_data (pd.DataFrame, optional): 待合并的upside数据，默认为None

    返回:
        tuple:
            - pd.DataFrame: 处理后的BOM数据
            - dict: 按产品(mm_number)分组的CODE2物料组数据
    """
    # 读取 Excel 文件
    bom_data = pd.read_excel(data_path, sheet_name='BOM(CS)')

    # 1. 只保留指定列
    required_columns = ['mm_number', 'Component', 'Qty', 'Group', 'Pairing']
    # 检查列是否存在
    existing_columns = [col for col in required_columns if col in bom_data.columns]
    bom_data = bom_data[existing_columns]

    # 2. 过滤Group列（只保留CODE0, CODE1, CODE2，其他置为空）
    if 'Group' in bom_data.columns:
        valid_groups = ['CODE0', 'CODE1', 'CODE2']
        # 创建掩码：不符合条件的Group值
        invalid_mask = ~bom_data['Group'].isin(valid_groups)
        # 将不符合条件的Group值置为空
        bom_data.loc[invalid_mask, 'Group'] = np.nan

    # 3. 添加新列
    bom_data['Replacement Status'] = np.nan
    bom_data['Replacement Type'] = np.nan

    # 4. 处理同mm_number的CODE1行
    if 'Group' in bom_data.columns and 'mm_number' in bom_data.columns:
        # 找出所有CODE1行
        code1_mask = bom_data['Group'] == 'CODE1'
        code1_rows = bom_data[code1_mask].copy()

        # 按mm_number分组，标记第一个CODE1
        first_code1_idx = code1_rows.groupby('mm_number').head(1).index

        # 标记Replacement Status
        bom_data.loc[first_code1_idx, 'Replacement Status'] = 'Replacement'

        # 标记其他CODE1为Substitute并置Qty为0
        other_code1_idx = code1_rows.index.difference(first_code1_idx)
        bom_data.loc[other_code1_idx, 'Replacement Type'] = 'Substitute'
        bom_data.loc[other_code1_idx, 'Qty'] = 0

    # 5. 处理Pairing为0的行
    if 'Pairing' in bom_data.columns and 'Qty' in bom_data.columns:
        # 处理各种可能的0值表示方式
        pairing_zero_mask = (
                (bom_data['Pairing'] == 0) |
                (bom_data['Pairing'] == '0') |
                (bom_data['Pairing'].astype(str).str.strip() == '0')
        )
        bom_data.loc[pairing_zero_mask, 'Qty'] = 0

    # 6. 提取CODE2物料组数据
    code2_data = {}
    if 'Group' in bom_data.columns and 'mm_number' in bom_data.columns:
        # 筛选CODE2数据
        code2_mask = bom_data['Group'] == 'CODE2'
        code2_df = bom_data[code2_mask].copy()

        # 按产品(mm_number)分组存储
        for mm_number, group_df in code2_df.groupby('mm_number'):
            # 转换为列表字典，便于后续处理
            code2_data[mm_number] = group_df[['Component', 'Qty']].to_dict('records')

    # 7. 排序（mm_number为主，Group为辅）
    sort_columns = []
    if 'mm_number' in bom_data.columns:
        sort_columns.append('mm_number')
    if 'Group' in bom_data.columns:
        sort_columns.append('Group')

    if sort_columns:
        bom_data = bom_data.sort_values(by=sort_columns)

    # 8. 重置索引
    bom_data.reset_index(drop=True, inplace=True)
    # 删除Group和Pairing列
    columns_to_drop = [col for col in ['Group', 'Pairing'] if col in bom_data.columns]
    if columns_to_drop:
        bom_data = bom_data.drop(columns=columns_to_drop)

    bom_data.columns = ['Scode', 'SPN', '所需物料数量', 'Replacement Status', 'Replacement Type']
    # 可选：与upside数据合并
    if upside_data is not None:
        # 检查是否存在共同列名
        product_codes = upside_data.get('Scode', [])
        bom_data = duplicate_bom_with_upside(product_codes, bom_data)

    return bom_data


def calculate_material_ratios(inventory, bom_groups, round_digits=2):
    """
    计算每个单独物料的消耗比例（百分比）。
    当同一物料在多个组出现时，默认把它在每个组获得的组百分比相加。

    参数:
        inventory: dict 物料库存
        bom_groups: list 物料组定义（支持 keys 'A','B','C'... 或 'materials': [...]）
        round_digits: 小数位数（默认 2）

    返回:
        dict: {material: pct, ...} （百分比，可能对重复物料累加，最小为0）
    """
    # 先判断所有库存是否 <= 0
    all_non_positive = all(float(v) <= 0 for v in inventory.values())

    # 若全部库存都 <= 0，直接返回第1组 100%
    if all_non_positive:
        result = {}
        for i, group in enumerate(bom_groups):
            for key in ['A', 'B', 'C', 'D', 'E', 'F']:
                if key in group:
                    result[group[key]] = 100.0 if i == 0 else 0.0
        return result

    capacities = []
    group_materials = []

    for group in bom_groups:
        if 'materials' in group:
            materials = list(group['materials'])
        else:
            materials = [group[k] for k in ['A', 'B', 'C', 'D', 'E', 'F'] if k in group]

        ratios = group.get('ratio', (1,) * len(materials))
        if isinstance(ratios, (int, float)):
            ratios = [float(ratios)] * len(materials)
        ratios = list(ratios)

        caps = []
        for m, r in zip(materials, ratios):
            stock = max(float(inventory.get(m, 0.0)), 0.0)  # 负库存按0处理
            caps.append(stock / r if r else 0.0)
        capacity = min(caps) if caps else 0.0

        capacities.append(capacity)
        group_materials.append({'materials': materials, 'capacity': capacity})

    total_capacity = sum(capacities)
    result = {}

    if total_capacity == 0:
        for i, gm in enumerate(group_materials):
            pct = 100.0 if i == 0 else 0.0
            for m in gm['materials']:
                result[m] = round(result.get(m, 0.0) + pct, round_digits)
        return result

    for gm, cap in zip(group_materials, capacities):
        pct = (cap / total_capacity) * 100.0
        pct_rounded = max(round(pct, round_digits), 0.0)  # 保证比例不为负
        for m in gm['materials']:
            result[m] = round(result.get(m, 0.0) + pct_rounded, round_digits)

    # 最终结果再做一次下限保护
    for m in result:
        result[m] = max(result[m], 0.0)

    return result


def duplicate_bom_with_upside(product_codes: list, bom_df: pd.DataFrame) -> pd.DataFrame:
    """
    复制BOM表中指定Scode的数据，并重命名Scode以追加到原表

    Args:
        product_codes (list): 需要复制的Scode列表（必须存在于原表中）
        bom_df (pd.DataFrame): 原始BOM表（列需包含'Scode'）

    Returns:
        pd.DataFrame: 包含新增数据的新表

    Raises:
        ValueError: 如果输入的Scode不在原表中
    """
    # 校验输入非空
    if not product_codes:
        raise ValueError("S-code cannot be empty")

    # 获取原表中所有存在的Scode
    existing_products = bom_df['Scode'].unique()

    # 检查无效编码
    invalid_codes = [code for code in product_codes if code not in existing_products]
    if invalid_codes:
        raise ValueError(f"The following S-codes do not exist: {invalid_codes}")

    # 筛选需要复制的行（深拷贝避免链式赋值警告）
    filtered = bom_df[bom_df['Scode'].isin(product_codes)].copy()

    # 重命名Scode
    filtered.loc[:, 'Scode'] = filtered['Scode'] + '(Upside)'

    # 合并新旧数据
    new_bom = pd.concat([bom_df, filtered], ignore_index=True)

    return new_bom


if __name__ == '__main__':
    pti_ssdd_path = r"../data/1018/Solidigm_PTI CS Shortage Report_2025_WW39_0924.xlsx"
    pega_ssdd_path = r"../data/1018/WW39 Solidigm_Pegatron CS Shortage Report_2025 .xlsx"
    bom_path = r"../data/1018/BOM combine review BB.xlsx"
    # result = process_consigned_pti_ssdd(pti_ssdd_path)
    result = process_consigned_pega_ssdd(pega_ssdd_path)
    # result = process_cs_bom_data(bom_path)

    # result.to_excel("result.xlsx", index=False)
    # print(result)
    inventory = {
        'K84041-001': -100, 'K84040-001': -200,
        'AA001559F': 80, 'J85976-002': 160,
        'AA001655A': 50, 'K30207-001': 60
    }

    bom_groups = {
        "AA001928X": [
            {'A': 'K84041-001', 'B': 'K84040-001', 'ratio': (1, 2)},
            {'A': 'AA001559F', 'B': 'J85976-002', 'ratio': (1, 2)},
        ],
        "AA001928Y": [
            {'A': 'K84041-001', 'B': 'K84040-001', 'ratio': (1, 2)},
            {'A': 'AA001559F', 'B': 'AA001655A', 'ratio': (1, 2)},
            {'A': 'AA001559F', 'B': 'K30207-001', 'ratio': (1, 2)},
        ],
        "AA0019292": [  # 同 Y 的替代结构
            {'A': 'K84041-001', 'B': 'K84040-001', 'ratio': (1, 2)},
            {'A': 'AA001559F', 'B': 'AA001655A', 'ratio': (1, 2)},
            {'A': 'AA001559F', 'B': 'K30207-001', 'ratio': (1, 2)},
        ],
        "AA0019293": [
            {'A': 'K84041-001', 'B': 'K84040-001', 'ratio': (1, 2)},
            {'A': 'AA001559F', 'B': 'J85976-002', 'ratio': (1, 2)},
        ],
    }

    # res = calculate_material_ratios(inventory, bom_groups['AA001928Y'])
    # print(res)
