import json
import glob
import os

import numpy as np
import pandas as pd
from nltk.tokenize import word_tokenize
from pythainlp.tokenize import word_tokenize as thai_word_tokenize
from feature_set.sms.utils.data_utils import ngrams
from feature_set.base_feature import BaseFeature, RequstData
from collections import Counter
from feature_conf.config.constant import *
from feature_set.sms.utils.sender_word_utils.sms_utils import time_trans
import sys
sys.path.append("../../")



class SmsUnWoeV1(BaseFeature):
    def __init__(self,conf_path,language,n_gram = 3):
        super().__init__()

        self.order_split_file_name = 'order_split_no.conf'
        self.token_levels_file_name = 'token_levels.conf'
        self.conf_path = conf_path
        self.load_conf()

        self.language = language
        self.n_gram = n_gram
        self.mode_name = 'sms_woe'
        self.user_sms = None

        self.woe_level_list = None
        self.sms_grams_code = {}

        self.function_map ={
            # 'base_statis':self.base_statis_features,
            'time_window':self.time_statis_features
        }

        self.sms_token_level_conf = {}

        self.time_windows=['1d','3d','7d','15d','30d','60d','120d','180d','all']

    def load_conf(self):
        self.conf = {}
        path_list = os.path.abspath(__file__).split(os.sep)
        featurelib_index = path_list.index('featurelib')
        featurelib_path = os.sep.join(path_list[0:featurelib_index+1])
        conf_path = os.sep.join([featurelib_path,'feature_conf',self.conf_path])

        order_split_file = os.path.join(conf_path,self.order_split_file_name)
        self.conf['order_split'] = pd.read_parquet(order_split_file)

        token_levels_file = os.path.join(conf_path, self.token_levels_file_name)
        token_levels = pd.read_parquet(token_levels_file)

        token_levels['woe_level'] = token_levels['woe_level'].map(lambda x : str(int(x)))
        token_levels['level_tag'] = np.where(token_levels['lift'] > 1, 'higth_lift', 'low_lift')

        self.conf['token_levels'] = {}

        random_nos = token_levels['random_no'].unique()
        self.woe_level_list = token_levels['woe_level'].nunique()

        grams = token_levels['gram'].unique()
        for random_no in random_nos:
            self.conf['token_levels'][random_no]={}
            for gram in grams:
                self.conf['token_levels'][random_no][gram]=token_levels[ \
                        (token_levels['random_no'] == random_no) & (token_levels['gram'] == gram)]



    def load_request(self, request_data: RequstData):
        """
        加载数据到对象
        """

        tx_id = request_data.tx_id
        app_user_id = request_data.app_user_id
        order_id = request_data.order_id
        country_id = request_data.country_abbr
        apply_time = request_data.apply_time

        order_split = self.conf['order_split']
        order_split = order_split[order_split['app_order_id']==str(order_id)]['random_no'].to_list()
        if len(order_split)>0:
            random_no = str(order_split[0])
        else:
            random_no = 'all'

        assert country_id in GenericConfigConstant.COUNTRY_ID, "country id not in list, Please input correct country id"
        sms_list=[]
        try:
            sms_data = request_data.data_sources["sms_data"]
            sms_list = (
                json.loads(sms_data) if isinstance(sms_data, str) else sms_data
            )
        except:
            self.logger.warn(
                f"tx_id= {tx_id} ,app_user_id= {app_user_id} ,app_order_id= {order_id} , sms_data 解析失败，请检查数据格式")

        if sms_list is None or len(sms_list) == 0:
            user_sms = pd.DataFrame(
                    columns=['body', 'phone', 'read', 'src_phone', 'time', 'type', 'time_day', 'hour', 'weekday', 'month',
                             'words', 'sender'])
        else:
            user_sms = pd.DataFrame(sms_list)

        if len(user_sms)>0:
            user_sms = user_sms[user_sms['time'].apply(lambda x: len(str(x)) in [10, 13, 34])]
            user_sms.loc[:,'time'] = user_sms['time'].apply(lambda x: time_trans(x, country_id))

        user_sms['apply_time'] = pd.Timestamp(apply_time)
        user_sms = user_sms[user_sms['time'] < user_sms['apply_time']]

        # 每个人选取最近的3000条短信
        user_sms = user_sms.sort_values(by='time', ascending=False).head(3000)
        user_sms['time'] = pd.to_datetime(user_sms['time'], errors='coerce')
        user_sms.dropna(subset=['time'], inplace=True)

        user_sms['apply_day'] = user_sms['apply_time'].dt.normalize()

        user_sms['time_day'] = user_sms['time'].dt.normalize()

        user_sms['date_diff'] = (user_sms['apply_day'] - user_sms['time_day']).dt.days

        user_sms['sms_count']  = user_sms['body'].count()

        # 为每个body赋值便于后续计算
        user_sms['body_no'] = np.arange(1, len(user_sms) + 1)
        if self.language == 'thai':
            user_sms[f'body_words'] = user_sms['body'].map(lambda x: thai_word_tokenize(x) )
        else:
            user_sms[f'body_words'] = user_sms['body'].map(lambda x: word_tokenize(x, language=self.language) )
        for time_wind in self.time_windows:
            if time_wind == 'all':
                user_sms[time_wind] = 1
            else:
                days = int(time_wind.replace('d', ''))
                user_sms[time_wind] = np.where(user_sms['date_diff'] <= days, 1, 0)

        # 对短信进行预处理
        for i in range(1,self.n_gram+1):
            level_conf = self.conf['token_levels'][random_no][i]
            self.sms_token_level_conf[i] = level_conf

            token_list = f'gram{i}'
            user_sms[token_list] = user_sms[f'body_words'].map(lambda x: [ (k,v) for k,v in Counter(ngrams(x, i)).items()] )
            token_df = user_sms[[token_list,'time_day','apply_time','date_diff','sms_count','body_no']+self.time_windows].explode(token_list)
            token_df = token_df[token_df[token_list].notnull()]
            token_df['token'] = token_df[token_list].map(lambda x: x[0])
            token_df['token_count'] = token_df[token_list].map(lambda x: x[1])
            token_df = token_df.merge(level_conf[['token','woe_level','lift','level_tag']],on='token',how='left')
            token_df['token_unique'] = token_df['token'].nunique()
            token_df['token_count_total'] = token_df['token_count'].sum()

            # 将订单的短信按照gram的方式进行拉平和基础统计
            self.sms_grams_code[i]=token_df

        # 对短信进行预处理，并将解析号的基本数据挂载到对应的结构上
        self.user_sms = user_sms

    def time_grouped_feature(self, groupby):
        result = {}
        for gram_i in range(1, self.n_gram + 1):
            level_conf = self.sms_token_level_conf[gram_i]
            token_df = self.sms_grams_code[gram_i]
            calc_token_df = token_df[token_df['woe_level'].notnull()]
            totol_body_count = len(self.user_sms)
            gram_token_unique = token_df['token'].nunique()
            gram_token_count_sum = token_df['token_count'].sum()
            for time_wind in self.time_windows:
                level_conf_grouped = level_conf.groupby(groupby).agg(
                    level_token_unique=('token', 'nunique'))
                token_grouped = calc_token_df[calc_token_df[time_wind]==1].groupby(groupby).agg(
                    token_count=('token_count', 'sum'),
                    token_unique=('token', 'nunique'),
                    hit_order=('body_no', 'nunique')
                )
                token_grouped['order_ratio'] = token_grouped['hit_order'] / totol_body_count
                token_grouped['token_count_weight'] = token_grouped['token_count'] / totol_body_count
                token_grouped['token_frequency'] = token_grouped['token_count'] / gram_token_count_sum
                token_grouped['token_unique_rate'] = token_grouped['token_unique'] / gram_token_unique
                result_df = pd.concat([level_conf_grouped,token_grouped],axis=1)
                result_df['level_token_rate'] = result_df['token_unique'] / result_df['level_token_unique']
                result_df = result_df.fillna(0)
                for i, row in result_df.iterrows():
                    for col in  ['token_count','token_unique','hit_order','order_ratio','token_count_weight',
                                 'token_frequency','token_unique_rate','level_token_rate']:
                        feature_name = f'gram_{gram_i}_{groupby}_{i}_{col}_{time_wind}'
                        result[feature_name] = row[col]
        return result

    def time_statis_features(self):
        rs1 = self.time_grouped_feature('level_tag')
        rs2 = self.time_grouped_feature('woe_level')
        rs1.update(rs2)
        return rs1














