#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@File    ：data_utils.py
@IDE     ：PyCharm 
@Author  ：lmy
@Date    ：2024/7/25 16:19 
'''

import re
import math
import numpy as np
import pandas as pd
from itertools import chain
from datetime import datetime
from datetime import timedelta
from nltk import word_tokenize
from pythainlp import word_tokenize as word_tokenize_th
from feature_set.sms.utils.data_content import CONTENT
from nltk.tokenize import RegexpTokenizer

conf_lst = CONTENT()
STOPWORD_ES = conf_lst.STOPWORD_ES
STOPWORD_TH = conf_lst.STOPWORD_TH
STOPWORD_ID = conf_lst.STOPWORD_ID
STOPWORD_SW = conf_lst.STOPWORD_SW

# 时间窗口
WINDOW_LST = conf_lst.WINDOW_LST
TIME_INTERVALS = conf_lst.TIME_INTERVALS
WEEK_TYPES = conf_lst.WEEK_TYPES
TIME_AGG_TYPES = conf_lst.TIME_AGG_TYPES
TIME_PERIODS = conf_lst.TIME_PERIODS
# 基本统计单位
FEA_LST = conf_lst.FEA_LST
# se特征的类型
SE_TYPE = conf_lst.SE_TYPE
# 是否已读
READ_TYPE = conf_lst.READ_TYPE
# 接收发送
SMS_TYPE = conf_lst.SMS_TYPE
# 贷款信息
OVERDUE_LST = conf_lst.OVERDUE_LST


def tikenization(body, language):
    """
    短信分词
    body:短信内容
    """

    # 创建一个简单的正则表达式分词器
    tokenizer = RegexpTokenizer(r'\w+')
    if language == 'spanish':
        tokens = word_tokenize(body, language=language)
    elif language == 'indonesian':
        tokens = tokenizer.tokenize(body)
    elif language == 'thai':
        tokens = word_tokenize_th(body, engine="newmm")
    elif language == 'swahili':
        tokens = word_tokenize(body)
    else:
        raise ValueError("language error, the value not in ['spanish','thai']")
    return tokens


def is_carrier(sender, carrier_lst):
    sender_length = len(sender)
    for i in carrier_lst:
        carrier_length = len(i)
        if sender_length >= carrier_length:
            if i == sender[:carrier_length] or i == sender[-carrier_length:]:
                return 1
        else:
            if sender == i:
                return 1
    return 0


def is_gamble(sender, body, gamble_sender, gamble_words):
    """

    :param sender:
    :param body:
    :param gamble_sender:
    :param gamble_words:
    :return:
    """
    try:
        if sender in gamble_sender:
            return 1
        for pattern in gamble_words:
            if re.search(pattern, body):
                return 1
        return 0
    except:
        return 0


def is_bit(sender, words, bit_lst):
    """
    :param sender:
    :param words:
    :param bit_lst:
    :return:
    """
    try:
        if sender in bit_lst:
            return 1
        if set(words) & set(bit_lst):
            return 1
        return 0
    except:
        return 0


def is_uc(key_words, body):
    """

    :param key_words: list,目标关键词
    :param body: 短信内容
    :return:
    """
    for pattern in key_words:
        if re.search(pattern, body):
            return 1
    return 0


def calc_overdue_info(body):
    """
    匹配可能只含逾期金额或者逾期天数短信
    :param body: 短信内容
    :return:
    """
    overdue_days = np.nan
    overdue_amount = np.nan

    if 'เกินกำหนด' in body:
        overdue_days_pattern = r'(\d+)\s*วัน'
        overdue_amount_pattern = r'(\d[\d,]*\.?\d*)\s*(฿|บ|thb)'

        overdue_amount_match = re.search(overdue_amount_pattern, body)
        overdue_days_match = re.search(overdue_days_pattern, body)

        if overdue_amount_match:
            overdue_amount = float(overdue_amount_match.group(1).replace(',', ''))  # 去掉千位分隔符
        if overdue_days_match:
            overdue_days = float(overdue_days_match.group(1))

    return [overdue_days, overdue_amount]


def calc_overdue_info2(body):
    """
    匹配逾期金额或者逾期天数都含有的短信
    :param body:
    :return:
    """
    pattern = r'(ค้างชำระ|เกินกำหนด).*?(\d+)\s*วัน.*?(ยอดเงิน|ชำระคืน|ยอดวันนี้|ชำระ).*?(\d[\d,]*\.?\d*)\s*(฿|บ|thb)'
    match = re.search(pattern, body)

    if match:
        overdue_days = float(match.group(2))
        overdue_amount = float(match.group(4).replace(',', ''))  # 去掉千位分隔符
    else:
        overdue_days = np.nan
        overdue_amount = np.nan
    return [overdue_days, overdue_amount]


def calc_overdue_days(body, country_code):
    """
    匹配可能只含逾期金额或者逾期天数短信
    :param body: 短信内容
    :return:
    """
    overdue_days_res = np.nan

    overdue_days = np.nan

    overdue_days2 = np.nan

    if country_code == 'TH':

        if 'เกินกำหนด' in body:

            overdue_days_pattern = r'(\d+)\s*วัน'
            overdue_days_match = re.search(overdue_days_pattern, body)

            if overdue_days_match:
                overdue_days = float(overdue_days_match.group(1))
        else:

            pattern = r'(ค้างชำระ|เกินกำหนด).*?(\d+)\s*วัน.*?(ยอดเงิน|ชำระคืน|ยอดวันนี้|ชำระ).*?(\d[\d,]*\.?\d*)\s*(฿|บ|thb)'
            match = re.search(pattern, body)

            if match:
                overdue_days2 = float(match.group(2))

        if not pd.isna(overdue_days2):
            overdue_days_res = overdue_days2
        elif not pd.isna(overdue_days):
            overdue_days_res = overdue_days

    if country_code == 'CO':
        overdue_days_pattern = r'(\d+)\s*(dias|dia).*?(atrasados|atraso|retraso|mora|vencido|atrasado|retrasado)'
        overdue_days_pattern2 = r'(atrasados|atraso|retraso|mora|vencido|atrasado|retrasado).*?(\d+)\s*(dias|dia)'
        match = re.search(overdue_days_pattern, body)
        match2 = re.search(overdue_days_pattern2, body)

        if match:
            overdue_days_res = float(match.group(1))
        if match2:
            overdue_days_res = float(match2.group(2))

    return overdue_days_res


def calc_overdue_amt(body, country_code):
    """
    匹配可能只含逾期金额或者逾期天数短信
    :param body: 短信内容
    :return:
    """
    overdue_amount_res = np.nan

    overdue_amount = np.nan

    overdue_amount2 = np.nan

    if country_code == 'TH':

        if 'เกินกำหนด' in body:

            overdue_amount_pattern = r'(\d[\d,]*\.?\d*)\s*(฿|บ|thb)'
            overdue_amount_match = re.search(overdue_amount_pattern, body)

            if overdue_amount_match:
                overdue_amount = float(overdue_amount_match.group(1).replace(',', ''))  # 去掉千位分隔符
        else:

            pattern = r'(ค้างชำระ|เกินกำหนด).*?(\d+)\s*วัน.*?(ยอดเงิน|ชำระคืน|ยอดวันนี้|ชำระ).*?(\d[\d,]*\.?\d*)\s*(฿|บ|thb)'
            match = re.search(pattern, body)

            if match:
                overdue_amount2 = float(match.group(4).replace(',', ''))  # 去掉千位分隔符

        if not pd.isna(overdue_amount2):
            overdue_amount_res = overdue_amount2
        elif not pd.isna(overdue_amount):
            overdue_amount_res = overdue_amount

    if country_code == 'CO':
        overdue_amt_pattern = r'\$\s*(\d[\d,]*\.?\d*).*?(atrasados|atraso|retraso|mora|vencido|atrasado|retrasado)'
        overdue_amt_pattern2 = r'(atrasados|atraso|retraso|mora|vencido|atrasado|retrasado).*?\$\s*(\d[\d,|\d.]*\.?\d*)'
        match = re.search(overdue_amt_pattern, body)
        match2 = re.search(overdue_amt_pattern2, body)

        if match:
            overdue_amount_res = float(match.group(1).replace('.', '').replace(',', ''))
        if match2:
            overdue_amount_res = float(match2.group(2).replace('.', '').replace(',', ''))

    return overdue_amount_res


def calc_repay_amount(body, country_code):
    """
    匹配还款金额
    :param body:
    :return:
    """
    repay_amount = np.nan
    if country_code == 'TH':
        pattern = r'ได้ชำระคืน.*?(\d[\d,]*\.?\d*)\s*(฿|บ|thb)'
        match = re.search(pattern, body)

        if match:
            repay_amount = float(match.group(1).replace(',', ''))  # 去掉千位分隔符

    if country_code == 'CO':
        repay_amount = -999

    return repay_amount


def calc_pay_amount(body, country_code):
    """
    匹配余额
    :param body:
    :param country_code:
    :return:
    """
    pay_amount = np.nan

    if country_code == 'CO':
        pattern = r'(realizado un pago|el pago de la compra con tu tarjeta debito rappipay davivienda por) .*\$\s*(\d[\d,]*\.?\d*).*?exitoso'
        match = re.search(pattern, body)
        if match:
            pay_amount = float(match.group(2).replace(',', ''))

    return pay_amount


def calc_balance_amount(body, country_code):
    """
    匹配余额
    :param body:
    :param country_code:
    :return:
    """
    balance_amount = np.nan

    if country_code == 'CO':
        pattern = r'ahora tienes\s*\$\s*(\d[\d,]*\.?\d*).*?'
        match = re.search(pattern, body)
        if match:
            balance_amount = float(match.group(1).replace(',', ''))

    return balance_amount


def calc_overdue_fea(df, t):
    """
    计算逾期天数/金额、还款金额等指标
    :param df: 筛选条件之后的dataframe
    :param t: 不同时间窗口，用于命名
    :return:
    """
    res = {}
    if df.shape[0] != 0:
        for key in OVERDUE_LST:
            max_v = float(df[key].max())
            min_v = float(df[key].min())
            avg_v = float(df[key].mean())
            sum_v = float(df[key].sum())
            res[f"{key}_max_{t}"] = -999 if pd.isna(max_v) else max_v
            res[f"{key}_min_{t}"] = -999 if pd.isna(min_v) else min_v
            res[f"{key}_avg_{t}"] = -999 if pd.isna(avg_v) else avg_v
            res[f"{key}_sum_{t}"] = -999 if pd.isna(sum_v) else sum_v
        overdue_cnt = df[(df['overdue_days'].notnull()) | (df['overdue_amt'].notnull())].shape[0]
        repay_cnt = df[df['repay_amt'].notnull()].shape[0]
        res[f"overdue_cnt_{t}"] = overdue_cnt
        res[f"repay_amt_{t}"] = repay_cnt
    else:
        for key in OVERDUE_LST:
            res[f"{key}_max_{t}"] = -999
            res[f"{key}_min_{t}"] = -999
            res[f"{key}_avg_{t}"] = -999
            res[f"{key}_sum_{t}"] = -999

        res[f"overdue_cnt_{t}"] = -999
        res[f"repay_amt_{t}"] = -999
    return res


def calc_finance_fea(df, t, key):
    """
    计算支付和余额等指标
    :param df: 筛选条件之后的dataframe
    :param t: 不同时间窗口，用于命名
    :param key:特征字段
    :return:
    """
    res = {}
    if df.shape[0] != 0:
        max_v = df[key].max()
        min_v = df[key].min()
        avg_v = df[key].mean()
        sum_v = df[key].sum()
        res[f"{key}_max_{t}"] = -999 if pd.isna(max_v) else max_v
        res[f"{key}_min_{t}"] = -999 if pd.isna(min_v) else min_v
        res[f"{key}_avg_{t}"] = -999 if pd.isna(avg_v) else avg_v
        res[f"{key}_sum_{t}"] = -999 if pd.isna(sum_v) else sum_v
        cnt = df[df[key].notnull()].shape[0]
        res[f"{key}_{t}"] = cnt
    else:

        res[f"{key}_max_{t}"] = -999
        res[f"{key}_min_{t}"] = -999
        res[f"{key}_avg_{t}"] = -999
        res[f"{key}_sum_{t}"] = -999

        res[f"{key}_{t}"] = -999
    return res


def extract_contents_in_brackets(body):
    """
    解析短信开头括号内的词组
    body:短信内容
    """

    pattern = r'^\(([^()]*)\)|^\[([^\[\]]*)\]|^\{([^{}]*)\}|^\（([^（）]*)\）|^\【([^【】]*)\】|^\《([^《》]*)\》|^\<([^<>]*)\>|^\{([^{}]*)\}'

    # 使用findall方法查找所有匹配项
    # 注意：这会返回一个列表，每个元素都是一个元组，元组内的第一个非空元素即为匹配到的内容
    matches = re.findall(pattern, body)
    result = [match[i] for match in matches for i in range(len(match)) if match[i] != '']

    return result


def calc_cnt_fea(df):
    """
    计算body、body_pre、sender、word去重数
    :param df: 筛选条件之后的dataframe
    :return: dict; body、body_pre、sender、word去重数
    """
    res = {}
    if df.shape[0] != 0:
        for fea in FEA_LST:
            if fea != 'word':
                cnt = df[fea].nunique()
            else:
                cnt = len(set(chain.from_iterable(df[fea])))
            res[fea] = cnt
    else:
        for fea in FEA_LST:
            res[fea] = 0
    return res


def gen_cnt_fea(all_df, comp_df, t, sms_feature_res):
    """
    生成cnt、ratio特征
    :param all_df: 所有数据的dataframe
    :param comp_df: 竞品数据的dataframe
    :param t: str; 时间窗口，用于命名
    :param sms_feature_res: dict:存储特征计算结果
    :return: dict
    """
    res_cnt = calc_cnt_fea(all_df)
    res_comp_cnt = calc_cnt_fea(comp_df)
    for fea in FEA_LST:
        all_cnt = res_cnt[fea]
        comp_cnt = res_comp_cnt[fea]

        if all_cnt == 0:
            sms_feature_res[f'{fea}_{t}'] = 0
            sms_feature_res[f'{fea}_rto_{t}'] = -999
        else:
            sms_feature_res[f'{fea}_{t}'] = comp_cnt
            sms_feature_res[f'{fea}_rto_{t}'] = comp_cnt / all_cnt

    return sms_feature_res


def gen_top_fea(df, key_fea, fea_lst):
    """

    :param df:
    :param key_fea: 需要计算聚合的特征
    :param fea_lst: 需要计算cnt的特征，e.g.body,body_pre,word
    :return:
    """
    res = {}
    if df.shape[0] != 0:
        tmp_dict_all = df.groupby(key_fea)[fea_lst].nunique().to_dict()  # {'body':{1:4,0:5},...,'word':{1:3,0:4}}
        for fea in tmp_dict_all:
            val = tmp_dict_all[fea]
            cnt_sorted = sorted(val.items(), key=lambda item: (item[1], item[0]))
            res[f'top1_type_{fea}'] = str(cnt_sorted[-1][0])  # cnt最大的type
            res[f'cnt_{fea}_top1_type'] = cnt_sorted[-1][-1]  # cnt最大的type的cnt
            # cnt最大的type的占比
            all_cnt = sum(row[-1] for row in cnt_sorted)
            res[f'rto_{fea}_top1_type'] = cnt_sorted[-1][-1] / all_cnt if all_cnt > 0 else -999
    else:
        for fea in fea_lst:
            res[f'top1_type_{fea}'] = ''
            res[f'cnt_{fea}_top1_type'] = -999
            # cnt最大的type的占比
            res[f'rto_{fea}_top1_type'] = -999

    return res


def calc_density(tmp_df, t):
    """
    计算联系密度
    :return: float
    """
    if tmp_df.shape[0] != 0:
        tmp_day = tmp_df['time_day'].unique()
        apply_day = tmp_df['apply_day'].values[0]
        first_date = datetime.strptime(apply_day, '%Y-%m-%d') - timedelta(days=t)
        last_date = datetime.strptime(apply_day, '%Y-%m-%d')
        sms_time_lst = [datetime.strptime(i, '%Y-%m-%d') for i in sorted(tmp_day)]
        sms_time_lst = [first_date] + sms_time_lst + [last_date]
        # 计算联系密度
        res = 0
        for i in range(1, len(sms_time_lst)):
            res += (sms_time_lst[i] - sms_time_lst[i - 1]).days ** 2
        density = round(math.sqrt(res) / t, 6) if t > 0 else -999
    else:
        density = -999

    return density


def calc_distribution_utils(tmp_dict, apply_day):
    """
    聚合特征计算
    :param tmp_dict:
    :param apply_day:
    :return:
    """
    metrics_dic = {}

    for fea in tmp_dict:
        val = tmp_dict[fea]
        time_sorted = sorted(val.items(),
                             key=lambda item: (item[0], item[1]))  # list:[('203-01-01', 1), ('203-02-02', 1)]
        cnt_sorted = sorted(val.items(), key=lambda item: (item[1], item[0]))
        metrics_dic[f'{fea}_newest'] = time_sorted[-1][-1]  # 据申请时间最近那天的count
        metrics_dic[f'{fea}_oldest'] = time_sorted[0][-1]  # 据申请时间最远那天的count
        metrics_dic[f'{fea}_newest_oldest_diff'] = metrics_dic[f'{fea}_newest'] - metrics_dic[f'{fea}_oldest']
        metrics_dic[f'{fea}_max'] = cnt_sorted[-1][-1]  # cnt最大值
        metrics_dic[f'{fea}_min'] = cnt_sorted[0][-1]  # cnt最小值
        metrics_dic[f'{fea}_max_min_diff'] = metrics_dic[f'{fea}_max'] - metrics_dic[f'{fea}_min']

        # 聚合之后，每一天的 cnt的 max 那天 距离申请的时间的 时间差
        apply_date = datetime.strptime(apply_day, '%Y-%m-%d')
        max_date = datetime.strptime(cnt_sorted[-1][0], '%Y-%m-%d')
        maa = (apply_date - max_date).days
        metrics_dic[f'{fea}_maa'] = maa

        # 聚合之后，每一天的 cnt的 max/min/avg/sd
        cnt_lst = [i[-1] for i in cnt_sorted]
        metrics_dic[f'{fea}_avg'] = np.mean(cnt_lst)
        metrics_dic[f'{fea}_std'] = np.std(cnt_lst)

    return metrics_dic


def calc_agg_fea(tmp_df):
    """
    生成聚合特征
    :param tmp_df:
    :return:
    """

    metrics_dic = {}
    if tmp_df.shape[0] != 0:

        tmp_dict = tmp_df.groupby('time_day')[['body', 'body_pre', 'sender']].nunique().to_dict()
        apply_day = tmp_df['apply_day'].values[0]
        tmp_res = calc_distribution_utils(tmp_dict, apply_day)
        metrics_dic.update(tmp_res)

        tmp_df_exploded = tmp_df.explode('word')
        tmp_dict = tmp_df_exploded.groupby('time_day')[['word']].nunique().to_dict()
        tmp_res = calc_distribution_utils(tmp_dict, apply_day)
        metrics_dic.update(tmp_res)

    else:
        for fea in FEA_LST:
            metrics_dic[f'{fea}_newest'] = -999
            metrics_dic[f'{fea}_oldest'] = -999
            metrics_dic[f'{fea}_newest_oldest_diff'] = -999
            metrics_dic[f'{fea}_max'] = -999
            metrics_dic[f'{fea}_min'] = -999
            metrics_dic[f'{fea}_max_min_diff'] = -999
            metrics_dic[f'{fea}_maa'] = -999
            metrics_dic[f'{fea}_avg'] = -999
            metrics_dic[f'{fea}_std'] = -999

    return metrics_dic


def calc_continue(tmp_df, t):
    res_dic = {}
    if tmp_df.shape[0] != 0:

        tmp_day = tmp_df['time_day'].unique()
        apply_day = tmp_df['apply_day'].values[0]
        sms_time_lst = [datetime.strptime(i, '%Y-%m-%d') for i in sorted(tmp_day)]
        last_date = datetime.strptime(apply_day, '%Y-%m-%d')
        first_date = datetime.strptime(min(tmp_day), '%Y-%m-%d')

        # 聚合天数
        agg_cnt = tmp_df['time_day'].nunique()

        if t != 'all':
            t = int(t.replace('d', ''))
        else:
            t = (last_date - first_date).days

        # if first_date != sms_time_lst[0] and last_date == sms_time_lst[-1]:
        #     sms_time_lst = [first_date] + sms_time_lst
        # if first_date == sms_time_lst[0] and last_date != sms_time_lst[-1]:
        #     sms_time_lst = sms_time_lst + [last_date]
        # if first_date != sms_time_lst[0] and last_date != sms_time_lst[-1]:
        #     sms_time_lst = [first_date] + sms_time_lst + [last_date]

        # 聚合占比
        agg_rto = round(agg_cnt / t, 6) if t > 0 else -999

        # 空档数相关特征
        diff_lst = []
        max_consecutive = 1  # 最长连续天数
        current_consecutive = 1
        longest_start = first_date
        longest_end = first_date
        continue_days = []  # 连续天数的集合
        for i in range(1, len(sms_time_lst)):
            diff_day = (sms_time_lst[i] - sms_time_lst[i - 1]).days - 1
            diff_lst.append(diff_day)

            if diff_day == 0:
                current_consecutive += 1
            else:
                continue_days.append(current_consecutive)
                if current_consecutive > max_consecutive:
                    max_consecutive = current_consecutive
                    longest_start = first_date
                    longest_end = sms_time_lst[i - 1]
                current_consecutive = 1
                first_date = sms_time_lst[i]
        continue_days.append(current_consecutive)
        # 检查最后一个连续日期段
        if current_consecutive > max_consecutive:
            max_consecutive = current_consecutive
            longest_start = first_date
            longest_end = sms_time_lst[-1]

        length = len(diff_lst)
        max_interval = max(diff_lst) if length > 0 else 0
        min_interval = min(diff_lst) if length > 0 else 0
        avg_interval = sum(diff_lst) / length if length > 0 else 0
        std_interval = np.std(diff_lst) if length > 0 else 0

        # 连续相关特征

        length = len(continue_days)
        max_continue = max(continue_days)
        min_continue = min(continue_days)
        avg_continue = sum(continue_days) / length
        std_continue = np.std(continue_days)

        # 连续有值最大天数的end时间与申请时间差
        max_continue_ea = (last_date - longest_end).days

        # 空档大于x天的空档数
        interval_days_over = {}
        for j in [5, 10, 20]:
            diff_day_over = sum(t > j for t in diff_lst)
            interval_days_over[f'interval_days_over_{j}days'] = diff_day_over
    else:
        agg_cnt = 0
        agg_rto = 0
        max_interval = -999
        min_interval = -999
        avg_interval = -999
        std_interval = -999
        max_continue = -999
        min_continue = -999
        avg_continue = -999
        std_continue = -999

        # 连续有值最大天数的end时间与申请时间差
        max_continue_ea = -999

        # 连续有值的最大天数的 时间start和end
        longest_start = -999
        longest_end = -999

        # 大于n天的空档数
        interval_days_over = {}
        for j in [5, 10, 20]:
            interval_days_over[f'interval_days_over_{j}days'] = -999
    res_dic['agg_cnt'] = agg_cnt
    res_dic['agg_rto'] = agg_rto
    res_dic['max_interval'] = max_interval
    res_dic['min_interval'] = min_interval
    res_dic['avg_interval'] = avg_interval
    res_dic['std_interval'] = std_interval
    res_dic['max_continue'] = max_continue
    res_dic['min_continue'] = min_continue
    res_dic['avg_continue'] = avg_continue
    res_dic['std_continue'] = std_continue
    res_dic['max_continue_ea'] = max_continue_ea
    res_dic['longest_start'] = longest_start
    res_dic['longest_end'] = longest_end
    for i in interval_days_over:
        res_dic[i] = interval_days_over[i]

    return res_dic


def calc_se_fea(df):
    """
    计算se特征
    :param df: 需要计算se特征的dataframe
    :return:
    """
    res = {}
    if df.shape[0] != 0:
        apply_day = df['apply_day'].values[0]
        s_day = df['time_day'].min()
        e_day = df['time_day'].max()

        apply_date = datetime.strptime(apply_day, '%Y-%m-%d')
        s_date = datetime.strptime(s_day, '%Y-%m-%d')
        e_date = datetime.strptime(e_day, '%Y-%m-%d')

        es = (e_date - s_date).days
        as_ = (apply_date - s_date).days
        ae = (apply_date - e_date).days
        res['es'] = es
        res['as'] = as_
        res['ae'] = ae

    else:
        res['es'] = -999
        res['as'] = -999
        res['ae'] = -999

    return res


def calc_dod_cnt_rto(tmp_df, i, fea, sms_feature_res):
    """
    tmp_df:df
    i:环比时间窗口 e.g. d7od7
    fea:需要计算count的特征,e.g. body、word
    sms_feature_res:特征结果存储字段
    """
    if tmp_df.shape[0] != 0:
        unique_cnt0 = tmp_df[tmp_df[i] == 0][fea].nunique()
        unique_cnt1 = tmp_df[tmp_df[i] == 1][fea].nunique()
        unique_cnt_diff = unique_cnt0 - unique_cnt1
        unique_cnt_rto = round(unique_cnt0 / unique_cnt1, 6) if unique_cnt1 > 0 else -999

        cnt0 = tmp_df[tmp_df[i] == 0].shape[0]
        cnt1 = tmp_df[tmp_df[i] == 1].shape[0]
        cnt_diff = cnt0 - cnt1
        cnt_rto = round(cnt0 / cnt1, 6) if cnt1 > 0 else -999

        sms_feature_res[f'unique_{fea}_diff_{i}'] = unique_cnt_diff
        sms_feature_res[f'{fea}_diff_{i}'] = cnt_diff

        sms_feature_res[f'unique_{fea}_rto_{i}'] = unique_cnt_rto
        sms_feature_res[f'{fea}_rto_{i}'] = cnt_rto
    else:

        sms_feature_res[f'unique_{fea}_diff_{i}'] = -999
        sms_feature_res[f'{fea}_diff_{i}'] = -999

        sms_feature_res[f'unique_{fea}_rto_{i}'] = -999
        sms_feature_res[f'{fea}_rto_{i}'] = -999

    return sms_feature_res

def ngrams(tokens, n):
    """生成给定长度的N-grams"""
    tokens = [str(x) for x in tokens]
    return [' '.join(tokens[i:i + n]) for i in range(len(tokens) - n + 1)]