import logging


def merge_cache_pool(df, *, dfType):
    """
    991、990当缓存池
    分步筛选和多重合并的策略，确保不同类型广告数据能正确关联到同一订单号下
    :param df:
    :param dfType:   1——普通广告    2——视频类广告
    :return:
    """
    if dfType == '1':
        columns_18 = ['adOrderNo', 'NumId', 'adSum', 'adId']
        columns_6 = ['adOrderNo', 'adParam', 'img', 'sp']
    elif dfType == '2':
        columns_18 = ['adOrderNo', 'adId']
        columns_6 = ['adOrderNo', 'adParam', 'sp']

    # 以adOrderNo分组，然后统计每个分组中每个adType出现的次数，没有adtype的会被填为0
    grouped_counts = df.groupby('adOrderNo')['adType'].value_counts().unstack(fill_value='0')
    # logging.info(f'adType出现的次数grouped_counts: \n{grouped_counts.to_string()}')

    # 最后形成一个df——每个adOrderNo的adType出现的Timestamp，都赋予该订单号不同adType出现的次数
    time_df = df[['Timestamp', 'adOrderNo']][df['adType'] == '18']
    # logging.info(f'合并前的time_df: \n{time_df.to_string()}')
    grouped_counts = time_df.merge(grouped_counts, on='adOrderNo', how='left')
    # logging.info(f'time_df合并后的grouped_counts: \n{grouped_counts.to_string()}')

    # 根据时间戳只保留grouped_counts中adType=18的数据，最后广告位df的时间戳为adType=18的
    tmp_df = df[df['adType'] == '18']
    # logging.info(f'tmp_df: \n{tmp_df.to_string()}')
    grouped_counts_18 = grouped_counts.merge(tmp_df[columns_18],
                                             on='adOrderNo',
                                             how='inner')
    # logging.info(f'保留adType=18的grouped_counts_18: \n{grouped_counts_18.to_string()}')

    # 记录adParam列，数据为adType=6的adParam数据
    adParam_tmp_df = df[df['adType'] == '6']
    grouped_counts_6 = grouped_counts.merge(adParam_tmp_df[columns_6],
                                            on='adOrderNo',
                                            how='left')
    # logging.info(f'保留adType=6的grouped_counts_6: \n{grouped_counts_6.to_string()}')

    # 将2个grouped_counts合并为一条数据
    grouped_counts = grouped_counts_18.merge(grouped_counts_6[columns_6],
                                             on='adOrderNo',
                                             how='left')
    # logging.info(f'18和6合并后的grouped_counts: \n{grouped_counts.to_string()}')

    # 去重（保留第一条记录）
    grouped_counts = grouped_counts.drop_duplicates(subset=['adOrderNo'], keep='first')
    # logging.info(f'merge_cache_pool的grouped_counts: \n{grouped_counts.to_string()}')
    return grouped_counts
