import subprocess
import time
import logging
import numpy as np
import requests
import pandas as pd
import re
from urllib.parse import unquote
import atexit
import warnings

warnings.filterwarnings("ignore", category=FutureWarning)
from Determining_ad.src.Bidding.cache_pool_bidding import merge_cache_pool
from Determining_ad.src.Bidding.self_pool_bidding import merge_self_pool
from Determining_ad.src.Tools.df_column import ad_num, ad_sum, organize_df, organize_vedio_df
from Determining_ad.src.Tools.ad_rule import check_pool_ad_events


class Others_ad_log_start:

    def __init__(self, entity=None, phone_brand=None, all_df=None, ad_df=None, packname=None, title="未加密"):
        """
        构造函数
        :param entity:
        :param phone_brand:
        :param all_df:
        :param ad_df:
        :param packname:
        :param title: 加密或者未加密
        """
        self.entity = entity
        self.phone_brand = phone_brand
        self.all_df = all_df
        self.ad_df = ad_df
        self.packname = packname
        self.title = title

    def get_all_df(self, all_df):
        self.all_df = all_df

    def get_ad_df(self, all_df):
        """"
        获取ad_df
        :param all_df:
        :return: ad_df
        """
        err_row = all_df.query('Level == "E"')
        if err_row.empty:
            logging.info('日志没有报错')
        else:
            err_row_message = err_row['Message']
            # logging.info(f'err_row_message: {err_row_message}')
        # 找广告事件
        ad_pattern = r'adId\[(\d+)\],\s+state\[(.*?)\],\s+adParam\[(.*?)\],?\s+adOrderNo\[(.*?)\],\s+adType\[(\d+)\],\s+sp\[(.*?)\],\s+img\[(.*?)\]'
        data_list = []
        index_list = []
        # 匹配日志行
        for index, row in all_df.iterrows():
            # logging.info(f'开始处理message: {row["Message"]}')
            matches = re.findall(ad_pattern, row["Message"])
            if matches:
                # logging.info(f'匹配成功的message: {row["Message"]}')
                timestamp = row["Timestamp"]
                (adId, chinese_adType, adParam, adOrderNo, adType, sp, img) = matches[0]
                data_list.append([timestamp, adId, chinese_adType, adParam, adOrderNo, adType, sp, img])
                index_list.append(index)
        # 将数据转换为DataFrame
        columns = ['Timestamp', 'adId', 'State', 'adParam', 'adOrderNo', 'adType', 'sp', 'img']
        df = pd.DataFrame(data_list, columns=columns, index=index_list)
        # 计算NumId列
        self.ad_df = ad_num(df)
        # 因为400的28和18都记录在400广告位，所以排除400
        self.ad_df = df[df['adId'] != '400']
        return self.ad_df

    def get_entity(self, entity):
        """
        给主体属性赋值
        :param entity:
        :return:
        """
        self.entity = entity

    def get_phone_brand(self, phone_brand):
        self.phone_brand = phone_brand

    def get_pool_ad(self, ad_df):
        pd.set_option('display.max_rows', None)
        # 使用startswith方法找到开头为9的adid
        # https://geek-docs.com/pandas/pandas-dataframe/boolean-indexing-in-pandas.html  关于pandas的布尔索引
        pool_df = ad_df[ad_df['adId'].astype(str).str.startswith('99')]
        # logging.info(f'pool_df: \n{pool_df.to_string()}')
        time_df = pool_df[pool_df['adType'] == '28'][['Timestamp', 'adOrderNo', 'adParam', 'img', 'sp']]
        # logging.info(f'time_df: \n{time_df.to_string()}')

        # ---------------------------
        # 筛选条件：adType 是 3 或 4，且 adOrderNo 和 adParam 存在于 ime_df 中
        mask = (
                (pool_df['adType'].isin(['3', '4'])) &  # adType 为 3 或 4
                (pool_df['adOrderNo'].isin(time_df['adOrderNo'])) &  # adOrderNo 在 ime_df 中存在
                (pool_df['adParam'].isin(time_df['adParam']))  # adParam 在 ime_df 中存在
        )
        pool_filtered = pool_df[mask]
        # logging.info(f'pool_filtered: \n{pool_filtered.to_string()}')

        # 按 adOrderNo 和 adParam 去重（保留第一条）
        pool_unique = pool_filtered.drop_duplicates(subset=['adOrderNo', 'adParam'], keep='first')
        # logging.info(f'pool_unique: \n{pool_unique.to_string()}')

        # 将 pool_unique 的 img 合并到 ime_df
        time_df = time_df.merge(
            pool_unique[['adOrderNo', 'adParam', 'img', 'sp']],
            on=['adOrderNo', 'adParam'],
            how='left',  # 保留 ime_df 所有行，即使没有匹配项
            suffixes=('_original', '_matched')  # 区分原始列和合并列
        )

        # 重命名列（将匹配的 img 覆盖原始列，或保留新列）
        time_df.rename(columns={'img_matched': 'img'}, inplace=True)
        time_df.rename(columns={'sp_matched': 'sp'}, inplace=True)
        # logging.info(f'time_df: \n{time_df.to_string()}')
        # ---------------------------

        # 以adOrderNo分组，然后统计每个分组中每个adType出现的次数，没有adtype的会被填为0
        grouped_counts = pool_df.groupby(['adOrderNo', 'adParam', 'adId']).adType.value_counts().unstack(fill_value=0)
        # logging.info(f'adType出现的次数grouped_counts: \n{grouped_counts}')
        # 使用merge方法将两个df合并
        grouped_counts = grouped_counts.merge(
            pool_df[['adOrderNo', 'adParam', 'adId', 'img']],
            on=['adOrderNo', 'adParam'],
            how='left')

        # 合并完后去重
        grouped_counts = time_df.merge(grouped_counts, on=['adOrderNo', 'adParam', 'img'], how='left')
        # logging.info(f'去重后的grouped_counts: \n{grouped_counts}')
        # 使用drop_duplicates()方法去重
        grouped_counts = grouped_counts.drop_duplicates(subset=['adOrderNo', 'adParam'])

        # grouped_counts.drop("img_matched", axis=1, inplace=True)
        # logging.info(f'去重后的grouped_counts: \n{grouped_counts.to_string()}')

        '''
        检验事件是否存在,不存在赋0
        '''
        column_names = ['28', '32', '4', '3']
        for column_name in column_names:
            if column_name not in grouped_counts:
                grouped_counts[column_name] = 0

        # 使用apply()函数将check_ad_event标注grouped_counts的每一行
        grouped_counts['check'] = grouped_counts.apply(check_pool_ad_events, axis=1)
        # 获取现有的列名列表
        columns = grouped_counts.columns.tolist()

        # 重新排列列的顺序
        # grouped_counts = grouped_counts.reindex(columns=columns)
        column_data = grouped_counts.pop("adId")
        grouped_counts.insert(len(grouped_counts.columns), "adId", column_data)
        column_data = grouped_counts.pop("img")
        grouped_counts.insert(len(grouped_counts.columns), "img", column_data)
        grouped_counts.drop("img_original", axis=1, inplace=True)
        grouped_counts.drop("sp_original", axis=1, inplace=True)
        if grouped_counts.empty:
            logging.warning(f'{self.title}缓存池事件表为空')
        else:
            logging.info(f'{self.title}缓存池的事件表: \n{grouped_counts.to_string()}')
        return grouped_counts

    def get_main_ad(self, ad_df):
        # 找到主广告位后的布尔series
        adId_ser = ad_df['adId'].astype(str).str.startswith('4')

        # 计算adSum列
        first_df = ad_df[adId_ser]
        # logging.info(f'first_df: \n{first_df.to_string()}')
        mask_df = ad_sum(first_df)
        # logging.info(f'最新mask_df: \n{mask_df.to_string()}')

        # 合并不同数据到同一订单号，并且整理、排序列
        grouped_counts = organize_df(merge_cache_pool(mask_df, dfType='1'))

        if grouped_counts.empty:
            logging.warning(f'{self.title}主广告事件为空')
        else:
            logging.info(f'{self.title}主广告事件表：\n{grouped_counts.to_string()}')
        return grouped_counts

    def get_second_ad(self, ad_df):
        adId_ser = ad_df['adId'].astype(str).str.startswith('11')
        # 计算adSum列
        mask = adId_ser
        if mask.empty:
            mask = ad_df[(ad_df['adId'].astype(str).str.startswith('2')) & (ad_df['adId'].astype(str).str.len() == 4)]
            prefix_mapping = {
                "20": "1112",
                "21": "1113",
                "22": "1114",
                "23": "1115",
            }
            # 遍历字典进行批量替换
            for prefix, new_value in prefix_mapping.items():
                # 筛选以当前前缀开头的行
                mask = mask["adId"].str.startswith(prefix, na=False)
                # 替换符合条件的值
                mask.loc[mask, "adId"] = new_value

        second_df = ad_df[mask]
        mask_df = ad_sum(second_df)
        # logging.info(f'第二层adId_ser: \n{mask_df.to_string()}')

        # 合并不同数据到同一订单号，并且整理、排序列
        grouped_counts = organize_df(merge_cache_pool(mask_df, dfType='1'))

        if grouped_counts.empty:
            logging.warning(f'{self.title}子广告位的事件表为空')
        else:
            logging.info(f'{self.title}子广告事件：\n{grouped_counts.to_string()}')
        return grouped_counts

    def get_quit_ad(self, ad_df):

        adId_ser = ad_df['adId'].astype(str).str.startswith('23')

        # 计算adSum列
        mask = adId_ser & (ad_df['adId'].astype(str).str.len() == 3)
        quit_df = ad_df[mask]
        mask_df = ad_sum(quit_df)

        # 合并不同数据到同一订单号，并且整理、排序列
        grouped_counts = organize_df(merge_cache_pool(mask_df, dfType='1'))

        if grouped_counts.empty:
            logging.warning(f'{self.title}主退出弹窗广告位事件表为空')
        else:
            logging.info(f'{self.title}主退出弹窗事件：\n{grouped_counts.to_string()}')
        return grouped_counts

    def get_second_quit_ad(self, ad_df):
        # 找到241、251广告
        # https://geek-docs.com/pandas/pandas-dataframe/boolean-indexing-in-pandas.html  关于pandas的布尔索引
        adId_ser = (ad_df['adId'].astype(str).str.startswith('24')) | (ad_df['adId'].astype(str).str.startswith('25'))

        # 计算adSum列
        mask = adId_ser & (ad_df['adId'].astype(str).str.len() == 3)
        quit_df = ad_df[mask]
        mask_df = ad_sum(quit_df)

        # 合并不同数据到同一订单号，并且整理、排序列
        grouped_counts = organize_df(merge_cache_pool(mask_df, dfType='1'))

        if grouped_counts.empty:
            logging.warning(f'{self.title}子退出弹窗广告位事件表为空')
        else:
            logging.info(f'{self.title}子退出弹窗事件：\n{grouped_counts.to_string()}')
        return grouped_counts

    def get_video_ad(self, ad_df):
        '''
        使用缓存池激励视频
        :param ad_df:
        :return:
        '''
        mask_df = ad_df[ad_df['adId'].astype(str).str.startswith('5') & (ad_df['adId'] != '50')]
        # logging.info(f'mask_df: \n{mask_df.to_string()}')

        # 合并不同数据到同一订单号，并且整理、排序列
        grouped_counts = organize_vedio_df(merge_cache_pool(mask_df, dfType='2'))

        # 重新排列列的顺序
        if grouped_counts.empty:
            logging.warning(f'{self.title}激励视频事件表为空')
        else:
            logging.info(f'{self.title}激励视频的事件表: \n{grouped_counts.to_string()}')
        return grouped_counts

    def get_video50_ad(self, ad_df):
        '''
        50激励视频
        :param ad_df:
        :return:
        '''
        # 筛选adId为50的数据
        mask_df = ad_df[ad_df['adId'] == '50']
        # logging.info(f'mask_df: \n{mask_df.to_string()}')

        grouped_counts = organize_vedio_df(merge_self_pool(mask_df))

        # 重新排列列的顺序
        if grouped_counts.empty:
            logging.warning(f'{self.title}50激励视频事件表为空')
        else:
            logging.info(f'{self.title}50激励视频的事件表: \n{grouped_counts.to_string()}')
        return grouped_counts
    # def get_packname(self, all_df):
    #     """
    #     获取包名，用来生成日志
    #     :param all_df:
    #     # :param entity:
    #     :return: packname
    #     """
    #     base_url = "http://cms.cyngame.cn:8190/initAction/initLoadTable.action"
    #     pattern = r'sdk2_adconfig_req_data'
    #     for message in all_df['Message']:
    #         value_match = re.search(pattern, message)
    #         if value_match:
    #             decoded_value = unquote(message)  # 解码 value
    #
    #             # 定义正则表达式模式以提取 cp 值
    #             cp_pattern = r'ap=(\d+)'  # 解码后直接匹配 cp=xxxxx
    #
    #             # 使用正则表达式查找 cp 的值
    #             cp_match = re.search(cp_pattern, decoded_value)
    #
    #             if cp_match:
    #                 app_id = cp_match.group(1)
    #     # 构建请求参数
    #     params = {
    #         'methodName': 'GetAppVersionList',
    #         'actions': 'getAppVersionList',
    #         'formValue': f'{{"appType":"5","AppVersion_id":"{app_id}"}}'
    #     }
    #     try:
    #         response = requests.get(base_url, params=params)
    #         response.raise_for_status()  # 检查请求是否成功
    #         response = response.json()
    #         if 'data' in response and isinstance(response['data'], list):
    #             # 遍历 'data' 列表中的每一个字典，并提取 'AppName' 的值
    #             for item in response['data']:
    #                 app_name = item.get('AppName')
    #                 if app_name:
    #                     return app_name
    #         else:
    #             print("No 'data' list found in the response.")
    #         return response.json()  # 返回JSON格式的数据
    #     except requests.exceptions.RequestException as e:
    #         print(f"请求失败: {e}")
    #         return None


# 实例化一个实例
others_ad_log_start = Others_ad_log_start()
