import torch
import numpy as np
import pandas as pd
from torch.utils.data import IterableDataset
from sklearn.preprocessing import MinMaxScaler
from .shift import Shift

#返回nums中第一个>=target的值得位置，如果nums中都比target小，则返回len(nums)
def lower_bound(nums, target):
    low, high = 0, len(nums)-1
    pos = len(nums)
    while low<high:
        mid = (low+high)//2
        if nums[mid] < target:
            low = mid+1
        else:#>=
            high = mid
            #pos = high
    # print(pos, len(nums))
    if nums[low]>=target:
        pos = low
    return pos
class ByteDance(IterableDataset):
    def __init__(self, cfg: dict, mode="train") -> None:
        super(ByteDance).__init__()
        if mode == "train":
            self.path = cfg.get('Base').get('datasets_train_path')
        else:
            self.path = cfg.get('Base').get('datasets_valid_path')
        baseCfg =  cfg.get('Base')
        self.audio_feature = np.load(baseCfg.get('audio_feature'))
        self.behavior_sequence_itemid = np.load(baseCfg.get('behavior_sequence_itemid'), allow_pickle=True)
        self.behavior_sequence_createtime = np.load(baseCfg.get('behavior_sequence_createtime'), allow_pickle=True)
        context_field = np.load(baseCfg.get('context_field')) # ['author_id', 'item_city', 'channel', 'music_id', 'create_time', 'video_duration']
        self.face_feature = np.load(baseCfg.get('face_feature'))
        time_feature = np.load(baseCfg.get('time_feature'))
        self.title_feature = np.load(baseCfg.get('title_feature'))[:, :, 1]
        self.video_feature = np.load(baseCfg.get('video_feature'))

        self.behavior_sequence_len = 30 # 最近30条点击行为
        self.context_field_discrete_num = 4 # 离散字段个数 
        self.context_field_time_num = 12 # 发布时间特征数
        self.context_field_duration_num = 1 # 时间 特征单独
        

        context_field_duration = context_field[:, -1:] # 常量 时长
        duration_mean = np.mean(context_field_duration, keepdims=False) 
        duration_std = np.std(context_field_duration, keepdims=False) 
        context_field_duration = np.clip(context_field_duration, 0, int(duration_mean + 3 * duration_std)) # 去除严重偏离均值的数据
        # q = context_field_duration[:, 0]
        # p = pd.Series(context_field_duration[:, 0])
        # print('均值为 : {}，标准差为：{}  max: {}'.format(context_field_duration.mean(), context_field_duration.std(), context_field_duration.max()))
        # print('异常值共%i条' % len(q[np.abs(q-q.mean()) > 3*q.std()]))
        self.context_field_discrete = context_field[:, :-2] # 离散 ['author_id', 'item_city', 'channel', 'music_id']
        

        # 特征归一化
        min_max_scaler = MinMaxScaler()
        self.time_feature = min_max_scaler.fit_transform(time_feature)

        self.context_field_duration = min_max_scaler.fit_transform(context_field_duration)


    '''
    description: 面部特征、时间特征、文本特征、视频特征、上下文离散字段、上下文视频时间
    param {*} self
    param {*} item_id 
    return {*}
    '''
    def __map_feature(self, item_id):
        return torch.from_numpy(self.audio_feature[item_id]).float(), \
            torch.from_numpy(self.face_feature[item_id]).float(), \
            torch.from_numpy(self.time_feature[item_id]).float(), \
            torch.from_numpy(self.title_feature[item_id]).float(), \
            torch.from_numpy(self.video_feature[item_id]).float(), \
            torch.from_numpy(self.context_field_discrete[item_id]), \
            torch.from_numpy(self.context_field_duration[item_id]).float()

    '''
    description: 获取用户行为序列，获取最近的30条（不足20条）补零， 0表示无效特征，id
    param {*} self
    param {*} uid 用户id
    param {*} createTime 当前item发布时间
    return {*}
    '''
    def __map_behavior(self, uid, createTime):
        item_ids = self.behavior_sequence_itemid.item()[uid]
        create_time_sequence = self.behavior_sequence_createtime.item()[uid]
        # print(create_time_sequence)
        # createTime_itemID = zip(self.behavior_sequence_itemid.item()[uid], self.behavior_sequence_createtime.item()[uid])
        if len(list(create_time_sequence)) == 0:
            item_sequence = []
        else:
            index = lower_bound(list(create_time_sequence), createTime)
            item_sequence = item_ids[:index]
        if len(item_sequence) > self.behavior_sequence_len:
            item_sequence = item_sequence[-self.behavior_sequence_len:]
        else:
            item_sequence = (self.behavior_sequence_len - len(item_sequence)) * [0] + item_sequence # 前面补零

        assert len(item_sequence) == self.behavior_sequence_len

        behavior_discrete_tensor = torch.zeros((self.behavior_sequence_len, self.context_field_discrete_num), dtype=torch.int64)
        behavior_duration_tensor = torch.zeros((self.behavior_sequence_len, self.context_field_duration_num), dtype=torch.float32)
        behavior_time_tensor = torch.zeros((self.behavior_sequence_len, self.context_field_time_num), dtype=torch.float32)

        for idx, itemid in enumerate(item_sequence):
            if itemid != 0:
                behavior_discrete_tensor[idx, :] = torch.from_numpy(self.context_field_discrete[itemid])
                behavior_duration_tensor[idx, :] = torch.from_numpy(self.context_field_duration[itemid])
                behavior_time_tensor[idx, :] = torch.from_numpy(self.time_feature[itemid]).float()
        
        return (behavior_discrete_tensor, behavior_duration_tensor, behavior_time_tensor)

    def __uid_itemid(self, line):
        u_item_id = [line[0], line[2]]
        u_item_id_arr = np.array(u_item_id, dtype=np.int64)
        return torch.from_numpy(u_item_id_arr)

    def __finish_like(self, line):
        finish_like = [line[6], line[7]]
        finish_like_arr = np.array(finish_like, dtype=np.int64)
        return torch.from_numpy(finish_like_arr)

    def __iter__(self):
        with open(self.path, 'r') as f:
            for line in f:
                line_data = line.strip().split('\t')
                line_data = list(map(lambda x: int(x), line_data))
                for i in line_data[:6]:
                    assert i!=0, "error{}".format(line_data) 

                u_item = self.__uid_itemid(line_data)

                finish_like = self.__finish_like(line_data)
                audio_feature, face_feature, time_feature, title_feature, video_feature, context_field_discrete, context_field_scaler = self.__map_feature(line_data[2])
                behavior_tensor = self.__map_behavior(line_data[0], line_data[10])
                yield u_item, audio_feature, face_feature, time_feature, title_feature, video_feature, context_field_scaler, context_field_discrete, behavior_tensor, finish_like


