from pathlib import Path
import pandas as pd
from datetime import datetime
import os
import numpy as np


class SmsConfigConstant:
    """
    短信配置常量
    """
    INTERVAL_LIST = [1, 3, 7, 15, 30, 60, 90, 180, 360, 'all']
    TIME_PERIODS = [(0, 5, 'early_morning'), (6, 10, 'morning'), (11, 13, 'noon'), (14, 17, 'afternoon'),
                    (18, 23, 'night')]
    # WEEK_TYPES = ['weekday', 'weekend']



class SmsOverdueRateV1:
    """
    短信中word和sender的逾期率特征
    """


    @staticmethod
    def extract_sender_rlevel_cnt_features(df, time_col, config_all, apply_time):
        """
        提取 周期内 sender 数量类特征，包括数量和占比
        Args:
            df: input dataframe
            time_col: time column
            config_all: config data

        Returns:
            feature dict
        """
        feature_dict = {}
        apply_time = pd.to_datetime(apply_time)
        for risk_level in config_all['risk_level'].unique():
            senders = set(config_all[config_all['risk_level'] == risk_level]['sender'])
            for time_window in SmsConfigConstant.INTERVAL_LIST:
                if df.empty:
                    feature_dict[f'sender_cnt_rlevel{risk_level}_d{time_window}'] = -999
                    feature_dict[f'sender_ratio_rlevel{risk_level}_d{time_window}'] = -999
                    feature_dict[f'sender_cnt_rlevel{risk_level}_d{time_window}_max'] = -999
                    feature_dict[f'sender_cnt_rlevel{risk_level}_d{time_window}_min'] = -999
                    feature_dict[f'sender_cnt_rlevel{risk_level}_d{time_window}_mean'] = -999
                    feature_dict[f'sender_cnt_rlevel{risk_level}_d{time_window}_std'] = -999
                    feature_dict[f'sender_cnt_rlevel{risk_level}_d{time_window}_top1'] = ''
                else:
                    if time_window == 'all':
                        time_data = df[df[time_col] <= apply_time]
                    else:
                        time_window_ = pd.Timedelta(days=time_window)
                        time_data = df[(df[time_col] >= apply_time - time_window_) & (df[time_col] <= apply_time)]
                    # 去重后的sender数量
                    sender_count_nodup = len(set(time_data['sender']) & senders)

                    # sender数量、占比
                    feature_dict[f'sender_cnt_rlevel{risk_level}_d{time_window}'] = sender_count_nodup
                    if len(time_data) == 0:
                        feature_dict[f'sender_ratio_rlevel{risk_level}_d{time_window}'] = -99
                    else:
                        feature_dict[f'sender_ratio_rlevel{risk_level}_d{time_window}'] = sender_count_nodup / len(
                            time_data)

                    filter_sender = time_data[time_data['sender'].apply(lambda x: x in senders)]
                    if filter_sender.empty:
                        feature_dict[f'sender_cnt_rlevel{risk_level}_d{time_window}_top1'] = ''
                        feature_dict[f'sender_cnt_rlevel{risk_level}_d{time_window}_max'] = 0
                        feature_dict[f'sender_cnt_rlevel{risk_level}_d{time_window}_min'] = 0
                        feature_dict[f'sender_cnt_rlevel{risk_level}_d{time_window}_mean'] = 0
                        feature_dict[f'sender_cnt_rlevel{risk_level}_d{time_window}_std'] = 0
                    else:
                        sender_count = filter_sender.groupby('sender').size()
                        sender_count = sender_count.sort_values(ascending=False)

                        # sender数量的最大值、最小值、平均值、方差
                        feature_dict[f'sender_cnt_rlevel{risk_level}_d{time_window}_max'] = sender_count.max()
                        feature_dict[f'sender_cnt_rlevel{risk_level}_d{time_window}_min'] = sender_count.min()
                        feature_dict[f'sender_cnt_rlevel{risk_level}_d{time_window}_mean'] = sender_count.mean()
                        feature_dict[f'sender_cnt_rlevel{risk_level}_d{time_window}_std'] = sender_count.std()
                        feature_dict[f'sender_cnt_rlevel{risk_level}_d{time_window}_top1'] = str(sender_count.index[0])


        return feature_dict

    @staticmethod
    def extract_sender_rlevel_time_features(df, time_col, config_all, apply_time):
        """

        提取各风险等级 sender距离apply_time的最大最小时间差
        Args:
            df: input dataframe
            time_col: time column
            config_all: config data

        Returns:
            feature dict
        """
        feature_dict = {}
        apply_time = pd.to_datetime(apply_time)
        for risk_level in config_all['risk_level'].unique():
            if df.empty:
                feature_dict[f'sender_time_diff_rlevel{risk_level}_max'] = -999
                feature_dict[f'sender_time_diff_rlevel{risk_level}_min'] = -999
                feature_dict[f'sender_time_diff_rlevel{risk_level}_mean'] = -999
                feature_dict[f'sender_time_diff_rlevel{risk_level}_std'] = -999
            else:
                senders = set(config_all[config_all['risk_level'] == risk_level]['sender'])
                feature_dict[f'sender_time_diff_rlevel{risk_level}_max'] = (
                        apply_time - df[df['sender'].apply(lambda x: x in senders)][time_col]).max().days
                feature_dict[f'sender_time_diff_rlevel{risk_level}_min'] = (
                        apply_time - df[df['sender'].apply(lambda x: x in senders)][time_col]).min().days
                feature_dict[f'sender_time_diff_rlevel{risk_level}_mean'] = (
                        apply_time - df[df['sender'].apply(lambda x: x in senders)][time_col]).mean().days
                feature_dict[f'sender_time_diff_rlevel{risk_level}_std'] = (
                        apply_time - df[df['sender'].apply(lambda x: x in senders)][time_col]).std().days
        return feature_dict

    @staticmethod
    def extract_sender_rlevel_continuous_day_features(df, time_col, config_all, apply_time):
        """
        提取各风险等级 sender连续出现天数的最大值、最小值、平均值、方差
        Args:
            df: input dataframe
            time_col: time column
            config_all: config data

        Returns:
            feature dict
        """
        feature_dict = {}
        apply_time = pd.to_datetime(apply_time)
        for risk_level in config_all['risk_level'].unique():
            senders = set(config_all[config_all['risk_level'] == risk_level]['sender'])
            for time_window in SmsConfigConstant.INTERVAL_LIST:
                if df.empty:
                    feature_dict[f'sender_continuous_rlevel{risk_level}_d{time_window}_max'] = -999
                    feature_dict[f'sender_continuous_rlevel{risk_level}_d{time_window}_min'] = -999
                    feature_dict[f'sender_continuous_rlevel{risk_level}_d{time_window}_mean'] = -999
                    feature_dict[f'sender_continuous_rlevel{risk_level}_d{time_window}_std'] = -999
                else:
                    if time_window == 'all':
                        time_data = df[df[time_col] <= apply_time]
                    else:
                        time_window_ = pd.Timedelta(days=time_window)
                        time_data = df[(df[time_col] >= apply_time - time_window_) & (df[time_col] <= apply_time)]
                    time_data = time_data.sort_values(by='time_day', ascending=True)
                    time_data.reset_index(drop=True, inplace=True)
                    filtered_data = time_data[time_data['sender'].apply(lambda x: x in senders)]
                    if filtered_data.empty:
                        feature_dict[f'sender_continuous_rlevel{risk_level}_d{time_window}_max'] = 0
                        feature_dict[f'sender_continuous_rlevel{risk_level}_d{time_window}_min'] = 0
                        feature_dict[f'sender_continuous_rlevel{risk_level}_d{time_window}_mean'] = 0
                        feature_dict[f'sender_continuous_rlevel{risk_level}_d{time_window}_std'] = 0
                    elif filtered_data['time_day'].nunique() == 1:
                        feature_dict[f'sender_continuous_rlevel{risk_level}_d{time_window}_max'] = 1
                        feature_dict[f'sender_continuous_rlevel{risk_level}_d{time_window}_min'] = 1
                        feature_dict[f'sender_continuous_rlevel{risk_level}_d{time_window}_mean'] = 1
                        feature_dict[f'sender_continuous_rlevel{risk_level}_d{time_window}_std'] = 0
                    else:
                        time_day_list = filtered_data['time_day'].unique()
                        continuous_day = 1
                        continuous_day_list = []
                        for i in range(1, len(time_day_list)):
                            if (time_day_list[i] - time_day_list[i - 1]).days == 1:
                                continuous_day += 1
                            else:
                                continuous_day_list.append(continuous_day)
                                continuous_day = 1
                        continuous_day_list.append(continuous_day)
                        feature_dict[f'sender_continuous_rlevel{risk_level}_d{time_window}_max'] = max(
                            continuous_day_list)
                        feature_dict[f'sender_continuous_rlevel{risk_level}_d{time_window}_min'] = min(
                            continuous_day_list)
                        feature_dict[f'sender_continuous_rlevel{risk_level}_d{time_window}_mean'] = np.mean(
                            continuous_day_list)
                        feature_dict[f'sender_continuous_rlevel{risk_level}_d{time_window}_std'] = np.std(
                            continuous_day_list)
        return feature_dict

    @staticmethod
    def extract_sender_rlevel_shift_diff_features(df, time_col, config_all, apply_time):
        """
        提取各时间窗口、各风险等级 sender出现频次的环比和差，即相邻时间窗口的 sender出现频次的比值和差值
        Args:
            df: input dataframe
            time_col: time column
            config_all: config data

        Returns:
            feature dict
        """
        feature_dict = {}
        apply_time = pd.to_datetime(apply_time)
        for risk_level in config_all['risk_level'].unique():
            senders = set(config_all[config_all['risk_level'] == risk_level]['sender'])
            for time_window in SmsConfigConstant.INTERVAL_LIST:
                if df.empty:
                    feature_dict[f'sender_shift_diff_rlevel{risk_level}_d{time_window}'] = -999
                    feature_dict[f'sender_shift_ratio_rlevel{risk_level}_d{time_window}'] = -999
                else:
                    if time_window == 'all':
                        continue
                    else:
                        time_window_ = pd.Timedelta(days=time_window)
                        time_data = df[(df[time_col] >= apply_time - 2 * time_window_) & (df[time_col] <= apply_time)]
                    now_window_num = len(
                        set(time_data[time_data[time_col] > apply_time - time_window_]['sender']) & senders)
                    last_window_num = len(set(
                        time_data[
                            time_data[time_col].between(apply_time - 2 * time_window_, apply_time - time_window_)][
                            'sender']) & senders)
                    feature_dict[
                        f'sender_shift_diff_rlevel{risk_level}_d{time_window}'] = now_window_num - last_window_num
                    if last_window_num == 0:
                        feature_dict[f'sender_shift_ratio_rlevel{risk_level}_d{time_window}'] = -99
                    else:
                        feature_dict[
                            f'sender_shift_ratio_rlevel{risk_level}_d{time_window}'] = now_window_num / last_window_num
        return feature_dict

    @staticmethod
    def extract_sender_rlevel_time_period_features(df, time_col, config_all, apply_time):
        """
        提取各风险等级 sender出现频次的时间段特征
        Args:
            df: input dataframe
            time_col: time column
            config_all: config data

        Returns:
            feature dict
        """
        feature_dict = {}
        apply_time = pd.to_datetime(apply_time)
        for risk_level in config_all['risk_level'].unique():
            senders = set(config_all[config_all['risk_level'] == risk_level]['sender'])
            for time_window in SmsConfigConstant.INTERVAL_LIST:
                if time_window == 'all':
                    time_data = df[df[time_col] <= apply_time]
                else:
                    time_window_ = pd.Timedelta(days=time_window)
                    time_data = df[(df[time_col] >= apply_time - time_window_) & (df[time_col] <= apply_time)]
                for time_period in SmsConfigConstant.TIME_PERIODS:
                    if df.empty:
                        feature_dict[f'sender_cnt_rlevel{risk_level}_{time_period[2]}_d{time_window}'] = -999
                        feature_dict[f'sender_ratio_rlevel{risk_level}_{time_period[2]}_d{time_window}'] = -999
                    else:
                        time_data_period = time_data[time_data['hour'].between(time_period[0], time_period[1])]
                        feature_dict[f'sender_cnt_rlevel{risk_level}_{time_period[2]}_d{time_window}'] = len(
                            set(time_data_period['sender']) & senders)
                        if len(time_data_period) == 0:
                            feature_dict[f'sender_ratio_rlevel{risk_level}_{time_period[2]}_d{time_window}'] = -99
                        else:
                            feature_dict[f'sender_ratio_rlevel{risk_level}_{time_period[2]}_d{time_window}'] = len(
                                set(time_data_period['sender']) & senders) / len(time_data_period)
        return feature_dict



    @staticmethod
    def extract_word_rlevel_cnt_features(df, time_col, config_all, apply_time):
        """
        提取 周期内 word 数量类特征，包括数量和占比
        Args:
            df: input dataframe
            time_col: time column
            config_all: config data

        Returns:
            feature dict
        """
        feature_dict = {}
        apply_time = pd.to_datetime(apply_time)
        for risk_level in config_all['risk_level'].unique():
            words = set(config_all[config_all['risk_level'] == risk_level]['word'])
            for time_window in SmsConfigConstant.INTERVAL_LIST:
                if df.empty:
                    feature_dict[f'word_cnt_rlevel{risk_level}_d{time_window}'] = -999
                    feature_dict[f'word_ratio_rlevel{risk_level}_d{time_window}'] = -999
                    feature_dict[f'word_cnt_rlevel{risk_level}_d{time_window}_max'] = -999
                    feature_dict[f'word_cnt_rlevel{risk_level}_d{time_window}_min'] = -999
                    feature_dict[f'word_cnt_rlevel{risk_level}_d{time_window}_mean'] = -999
                    feature_dict[f'word_cnt_rlevel{risk_level}_d{time_window}_std'] = -999
                else:
                    if time_window == 'all':
                        time_data = df[df[time_col] <= apply_time]
                    else:
                        time_window_ = pd.Timedelta(days=time_window)
                        time_data = df[(df[time_col] >= apply_time - time_window_) & (df[time_col] <= apply_time)]
                    # time_data 格式
                    # time_data['words']是列表
                    word_count_nodup = sum([len(words & set(x)) for x in time_data['words']])

                    feature_dict[f'word_cnt_rlevel{risk_level}_d{time_window}'] = word_count_nodup

                    if sum([len(x) for x in time_data['words']]) == 0:
                        feature_dict[f'word_ratio_rlevel{risk_level}_d{time_window}'] = -99
                    else:
                        feature_dict[f'word_ratio_rlevel{risk_level}_d{time_window}'] = word_count_nodup / sum(
                            [len(x) for x in time_data['words']])

                    filtered_data = time_data[time_data['words'].apply(lambda x: len(set(x) & words) > 0)]
                    if filtered_data.empty:
                        feature_dict[f'word_cnt_rlevel{risk_level}_d{time_window}_max'] = 0
                        feature_dict[f'word_cnt_rlevel{risk_level}_d{time_window}_min'] = 0
                        feature_dict[f'word_cnt_rlevel{risk_level}_d{time_window}_mean'] = 0
                        feature_dict[f'word_cnt_rlevel{risk_level}_d{time_window}_std'] = 0
                    else:
                        word_count = time_data[time_data['words'].apply(lambda x: len(set(x) & words) > 0)].explode(
                            'words').groupby('words').size()

                        word_count = word_count.sort_values(ascending=False)

                        feature_dict[f'word_cnt_rlevel{risk_level}_d{time_window}_max'] = word_count.max()
                        feature_dict[f'word_cnt_rlevel{risk_level}_d{time_window}_min'] = word_count.min()
                        feature_dict[f'word_cnt_rlevel{risk_level}_d{time_window}_mean'] = word_count.mean()
                        feature_dict[f'word_cnt_rlevel{risk_level}_d{time_window}_std'] = word_count.std()

        return feature_dict

    @staticmethod
    def extract_word_rlevel_time_features(df, time_col, config_all, apply_time):
        """

        提取各风险等级 word距离apply_time的最大最小时间差
        Args:
            df: input dataframe
            time_col: time column
            config_all: config data

        Returns:
            feature dict
        """
        feature_dict = {}
        apply_time = pd.to_datetime(apply_time)
        for risk_level in config_all['risk_level'].unique():
            if df.empty:
                feature_dict[f'word_time_diff_rlevel{risk_level}_max'] = -999
                feature_dict[f'word_time_diff_rlevel{risk_level}_min'] = -999
                feature_dict[f'word_time_diff_rlevel{risk_level}_mean'] = -999
                feature_dict[f'word_time_diff_rlevel{risk_level}_std'] = -999
            else:
                words = set(config_all[config_all['risk_level'] == risk_level]['word'])
                feature_dict[f'word_time_diff_rlevel{risk_level}_max'] = (
                        apply_time - df[df['words'].apply(lambda x: len(set(x) & words) > 0)][time_col]).max().days
                feature_dict[f'word_time_diff_rlevel{risk_level}_min'] = (
                        apply_time - df[df['words'].apply(lambda x: len(set(x) & words) > 0)][time_col]).min().days
                feature_dict[f'word_time_diff_rlevel{risk_level}_mean'] = (
                        apply_time - df[df['words'].apply(lambda x: len(set(x) & words) > 0)][time_col]).mean().days
                feature_dict[f'word_time_diff_rlevel{risk_level}_std'] = (
                        apply_time - df[df['words'].apply(lambda x: len(set(x) & words) > 0)][time_col]).std().days
        return feature_dict

    @staticmethod
    def extract_word_rlevel_continuous_day_features(df, time_col, config_all, apply_time):
        """
        提取各风险等级 word连续出现天数的最大值、最小值、平均值、方差
        Args:
            df: input dataframe
            time_col: time column
            config_all: config data

        Returns:
            feature dict
        """
        feature_dict = {}
        apply_time = pd.to_datetime(apply_time)
        for risk_level in config_all['risk_level'].unique():
            words = set(config_all[config_all['risk_level'] == risk_level]['word'])
            for time_window in SmsConfigConstant.INTERVAL_LIST:
                if df.empty:
                    feature_dict[f'word_continuous_rlevel{risk_level}_d{time_window}_max'] = -999
                    feature_dict[f'word_continuous_rlevel{risk_level}_d{time_window}_min'] = -999
                    feature_dict[f'word_continuous_rlevel{risk_level}_d{time_window}_mean'] = -999
                    feature_dict[f'word_continuous_rlevel{risk_level}_d{time_window}_std'] = -999
                else:
                    if time_window == 'all':
                        time_data = df[df[time_col] <= apply_time]
                    else:
                        time_window_ = pd.Timedelta(days=time_window)
                        time_data = df[(df[time_col] >= apply_time - time_window_) & (df[time_col] <= apply_time)]
                    time_data = time_data.sort_values(by='time_day', ascending=True)
                    time_data.reset_index(drop=True, inplace=True)
                    filtered_data = time_data[time_data['words'].apply(lambda x: len(set(x) & words) > 0)]
                    if filtered_data.empty:
                        feature_dict[f'word_continuous_rlevel{risk_level}_d{time_window}_max'] = 0
                        feature_dict[f'word_continuous_rlevel{risk_level}_d{time_window}_min'] = 0
                        feature_dict[f'word_continuous_rlevel{risk_level}_d{time_window}_mean'] = 0
                        feature_dict[f'word_continuous_rlevel{risk_level}_d{time_window}_std'] = 0
                    elif filtered_data['time_day'].nunique() == 1:
                        feature_dict[f'word_continuous_rlevel{risk_level}_d{time_window}_max'] = 1
                        feature_dict[f'word_continuous_rlevel{risk_level}_d{time_window}_min'] = 1
                        feature_dict[f'word_continuous_rlevel{risk_level}_d{time_window}_mean'] = 1
                        feature_dict[f'word_continuous_rlevel{risk_level}_d{time_window}_std'] = 0
                    else:
                        continuous_day = 1
                        continuous_day_list = []
                        time_data_list = filtered_data['time_day'].unique()
                        for i in range(1, len(time_data_list)):
                            if (time_data_list[i] - time_data_list[i - 1]).days == 1:
                                continuous_day += 1
                            else:
                                continuous_day_list.append(continuous_day)
                                continuous_day = 1
                        continuous_day_list.append(continuous_day)
                        feature_dict[f'word_continuous_rlevel{risk_level}_d{time_window}_max'] = max(
                            continuous_day_list)
                        feature_dict[f'word_continuous_rlevel{risk_level}_d{time_window}_min'] = min(
                            continuous_day_list)
                        feature_dict[f'word_continuous_rlevel{risk_level}_d{time_window}_mean'] = np.mean(
                            continuous_day_list)
                        feature_dict[f'word_continuous_rlevel{risk_level}_d{time_window}_std'] = np.std(
                            continuous_day_list)
        return feature_dict

    @staticmethod
    def extract_word_rlevel_shift_diff_features(df, time_col, config_all, apply_time):
        """
        提取各时间窗口、各风险等级 word出现频次的环比和差，即相邻时间窗口的 word出现频次的比值和差值
        Args:
            df: input dataframe
            time_col: time column
            config_all: config data

        Returns:
            feature dict
        """
        feature_dict = {}
        apply_time = pd.to_datetime(apply_time)
        for risk_level in config_all['risk_level'].unique():
            words = set(config_all[config_all['risk_level'] == risk_level]['word'])
            for time_window in SmsConfigConstant.INTERVAL_LIST:
                if df.empty:
                    feature_dict[f'word_shift_diff_rlevel{risk_level}_d{time_window}'] = -999
                    feature_dict[f'word_shift_ratio_rlevel{risk_level}_d{time_window}'] = -999
                else:
                    if time_window == 'all':
                        continue
                    else:
                        time_window_ = pd.Timedelta(days=time_window)
                        time_data = df[(df[time_col] >= apply_time - 2 * time_window_) & (df[time_col] <= apply_time)]

                    feature_dict[f'word_shift_diff_rlevel{risk_level}_d{time_window}'] = \
                        time_data[time_data[time_col] > apply_time - time_window_]['words'].apply(
                            lambda x: len(set(x) & words)).sum() - \
                        time_data[
                            time_data[time_col].between(apply_time - 2 * time_window_, apply_time - time_window_)][
                            'words'].apply(lambda x: len(set(x) & words)).sum()

                    if time_data[time_data[time_col].between(apply_time - 2 * time_window_, apply_time - time_window_)][
                        'words'].apply(lambda x: len(set(x) & words)).sum() == 0:
                        feature_dict[f'word_shift_ratio_rlevel{risk_level}_d{time_window}'] = -99
                    else:
                        feature_dict[f'word_shift_ratio_rlevel{risk_level}_d{time_window}'] = \
                            time_data[time_data[time_col] > apply_time - time_window_]['words'].apply(
                                lambda x: len(set(x) & words)).sum() / \
                            time_data[
                                time_data[time_col].between(apply_time - 2 * time_window_, apply_time - time_window_)][
                                'words'].apply(lambda x: len(set(x) & words)).sum()

        return feature_dict

    @staticmethod
    def extract_word_rlevel_time_period_features(df, time_col, config_all, apply_time):
        """
        提取不同time_periods、不同风险等级的word出现频次
        Args:
            df: input dataframe
            time_col: time column
            config_all: config data

        Returns:
            feature dict
        """
        feature_dict = {}
        apply_time = pd.to_datetime(apply_time)
        for risk_level in config_all['risk_level'].unique():
            words = set(config_all[config_all['risk_level'] == risk_level]['word'])
            for time_window in SmsConfigConstant.INTERVAL_LIST:
                if time_window == 'all':
                    time_data = df[df[time_col] <= apply_time]
                else:
                    time_window_ = pd.Timedelta(days=time_window)
                    time_data = df[(df[time_col] >= apply_time - time_window_) & (df[time_col] <= apply_time)]
                for time_period in SmsConfigConstant.TIME_PERIODS:
                    if df.empty:
                        feature_dict[f'word_cnt_rlevel{risk_level}_{time_period[2]}_d{time_window}'] = -999
                        feature_dict[f'word_ratio_rlevel{risk_level}_{time_period[2]}_d{time_window}'] = -999
                    else:
                        time_data_period = time_data[time_data['hour'].between(time_period[0], time_period[1])]
                        feature_dict[f'word_cnt_rlevel{risk_level}_{time_period[2]}_d{time_window}'] = sum(
                            [len(set(words_) & words) for words_ in time_data_period['words']])

                        if sum([len(words) for words in time_data_period['words']]) == 0:
                            feature_dict[f'word_ratio_rlevel{risk_level}_{time_period[2]}_d{time_window}'] = -99
                        else:
                            feature_dict[f'word_ratio_rlevel{risk_level}_{time_period[2]}_d{time_window}'] = (
                                    feature_dict[f'word_cnt_rlevel{risk_level}_{time_period[2]}_d{time_window}']
                                    / sum([len(words) for words in time_data_period['words']]))
        return feature_dict


