"""
模型
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division

import random

import numpy as np
from scipy.stats import rankdata
import torch
import torch.nn as nn
from collections import defaultdict

from dynamic_pricing.common.recorder import cur_time, dur_dist
from dynamic_pricing.model.rayleigh_proc import ReyleighProc
from dynamic_pricing.common.neg_sampler import rand_sampler
from dynamic_pricing.common.pytorch_utils import SpEmbedding


# 模型
class DeepCoevolve(nn.Module):
    """
        模型
    """

    def __init__(self, num_users, num_items, embed_size, k, user_type_dic,
                 score_func='log_ll', dt_type='last', max_norm=None):
        """

        :param num_users:
        :param num_items:
        :param embed_size:
        :param k:
        :param score_func:
        :param dt_type:
        :param max_norm:
        """
        super(DeepCoevolve, self).__init__()

        self.user_events_list = [0 for _ in range(num_users)]

        self.user_type_dic = user_type_dic

        self.user_fuse_rate = 0.5

        self.k = k

        self.type_user_events = defaultdict(list)

        self.num_users = num_users  # 584399
        self.num_items = num_items
        self.max_norm = max_norm
        self.embed_size = embed_size
        self.score_func = score_func
        self.dt_type = dt_type

        self.user_embedding = SpEmbedding(num_users + k, embed_size)
        self.item_embedding = SpEmbedding(num_items, embed_size)

        self.type_embedding = SpEmbedding(k, embed_size, is_zero=True)
        self.type_embedding.requires_grad_(False)
        # self.type_embedding = SpEmbedding(k, embed_size)

        self.user_lookup_embed = {}
        self.item_lookup_embed = {}
        # self.type_lookup_embed = {}

        self.delta_t = np.zeros((self.num_items,), dtype=np.float32)

        # 融合所有同类型的user，得到type类型
        self.type_embed_cell = nn.RNNCell(embed_size, embed_size)

        self.user_cell = nn.GRUCell(embed_size, embed_size)
        self.item_cell = nn.GRUCell(embed_size, embed_size)
        self.type_cell = nn.GRUCell(embed_size, embed_size)

        self.user_fuse_cell = nn.Sequential(
            nn.Linear(2 * embed_size, embed_size),
            nn.ReLU(inplace=True)
        )

        self.updated_user_embed = None
        self.updated_item_embed = None
        # self.updated_type_embed = None

    def reset_user_events_list(self):
        """

        :return:
        """
        self.user_events_list = [0 for _ in range(self.num_users)]

    def normalize(self):
        """
            对embedding做归一化
        :return:
        """
        if self.max_norm is None:
            return
        self.user_embedding.normalize(self.max_norm)
        self.item_embedding.normalize(self.max_norm)

    def _get_embedding(self, side, idx, lookup):
        """

        :param side:
        :param idx:
        :param lookup:
        :return:
        """
        if side == 'type':
            idx += self.num_users
        if idx not in lookup:
            if side == 'user':
                lookup[idx] = self.user_embedding([idx])
            elif side == 'item':
                lookup[idx] = self.item_embedding([idx])
            else:
                lookup[idx] = self.user_embedding([idx])
        return lookup[idx]

    def get_cur_user_embed(self, user):
        """

        :param user:
        :return:
        """
        return self._get_embedding('user', user, self.user_lookup_embed)

    def get_cur_item_embed(self, item):
        """

        :param item:
        :return:
        """
        return self._get_embedding('item', item, self.item_lookup_embed)

    def get_cur_type_embed(self, type_id):
        """

        :param type_id:
        :return:
        """
        return self._get_embedding('type', type_id, self.user_lookup_embed)

    def get_pred_score(self, comp, delta_t):
        """

        :param comp:
        :param delta_t:
        :return:
        """
        # log_ll激活函数
        if self.score_func == 'log_ll':
            d_t = np.clip(delta_t, a_min=1e-10, a_max=None)
            return np.log(d_t) + np.log(comp) - 0.5 * comp * (d_t ** 2)
        elif self.score_func == 'comp':
            return comp
        elif self.score_func == 'intensity':
            return comp * delta_t
        else:
            raise NotImplementedError

    def get_type_id(self, user_id):
        """

        :param user_id:
        :return:
        """
        type_id = self.user_type_dic[user_id]
        return type_id

    def fuse_user_and_type_embed(self, user_id):
        """
            融合user_embed和user类型的embed
        :param user_id:
        :return: 返回融合后的embed
        """

        # cur_event当前事件user的特征
        cur_user_embed = self.get_cur_user_embed(user_id)
        # 得到user类别
        # type_id = self.get_type_id(user_id)
        # user类别的嵌入
        # cur_type_embed = self.get_cur_type_embed(type_id)

        # 线性层融合
        # cur_user_embed = torch.cat((cur_user_embed, cur_type_embed), 1)
        # cur_user_embed = self.user_fuse_cell(cur_user_embed)

        # 平均池化
        # cur_user_embed = cur_user_embed * self.user_fuse_rate + cur_type_embed * (1 - self.user_fuse_rate)
        # 最大池化
        # cur_user_embed = torch.max(cur_type_embed, cur_user_embed)

        return cur_user_embed

    def generate_zeros_embed(self, dim=None):
        """
            生成一个全零 tensor的嵌入
        :param dim: 需要生成向量的维度
        :return: 全零嵌入
        """
        # 默认和节点嵌入维度一样
        if dim is None:
            dim = self.embed_size
        # 生成0向量
        zero_tensor = torch.zeros(1, dim, dtype=torch.float32)

        return zero_tensor

    def get_test_user_embed(self, user_id):
        """

        :param user_id:
        :return:
        """
        # 是否启用冷启动
        # True表示启用，则考虑用类型来初始化新用户
        # False表示不启用，就是普通的coe
        is_cold_start = True
        # 表示是否老用户也需要考虑周围邻居
        # True表示老用户的嵌入也考虑同类user的影响
        is_include_old_user = True

        # 关闭冷启动
        if not is_cold_start:
            return self.get_cur_user_embed(user_id)

        # elif self.user_events_list[user_id] < 1:
        else:
            # 如果老用户不考虑同类user影响，且当前用户是老用户，则直接返回user嵌入
            if not is_include_old_user and self.user_events_list[user_id] > 0:
                return self.get_cur_user_embed(user_id)

            type_id = self.get_type_id(user_id)
            users = self.type_user_events[type_id]  # 同类用户
            type_embed = self.generate_zeros_embed()  # 类别嵌入
            len_cur_type = len(users)

            # 没有同类用户，则直接返回当前嵌入
            if len_cur_type == 0:
                return self.get_cur_user_embed(user_id)
            k = 1000
            # 同类型user数量大于k，就随机取k个
            if len_cur_type > k:
                user_sample_index = random.sample(range(0, len_cur_type), k)
            else:
                user_sample_index = range(len_cur_type)
            # 融合邻居的嵌入，得到当前特征嵌入
            for user_index in user_sample_index:
                user = users[user_index]
                cur_user_emb = self.get_cur_user_embed(user)
                type_embed += cur_user_emb
            type_embed /= len(user_sample_index)

            # 如果是老用户，则特征嵌入和当前user嵌入各占50%
            if self.user_events_list[user_id] > 0:
                type_embed = (type_embed + self.get_cur_user_embed(user_id)) / 2

            return type_embed

    def get_output(self, cur_event, phase, HR_k):
        """
            计算loss函数
        :param HR_k:
        :param cur_event:
        :param phase:
        :return:
        """

        # cur_event当前事件item的特征
        cur_item_embed = self.get_cur_item_embed(cur_event.item)
        cur_user_embed = self.fuse_user_and_type_embed(cur_event.user)
        # cur_user_embed = self.get_cur_user_embed(cur_event.user)

        # t_end = cur_event.t
        # 3.3的强度函数base_comp计算
        base_comp = ReyleighProc.base_compatibility(cur_user_embed, cur_item_embed)

        # 两个事件的时间差
        dur = cur_event.t - cur_time.get_cur_time(cur_event.user, cur_event.item)
        # 列表，存储所有事件时间差
        dur_dist.add_time(dur)

        # mse 5最后的时间预测公式
        # time_pred = ReyleighProc.time_mean(base_comp)
        # mae = torch.abs(time_pred - dur)
        # mse = (time_pred - dur) ** 2

        # 预测部分
        if phase == 'test':
            cur_user_embed = self.get_test_user_embed(cur_event.user)

            # 4.1 计算强度函数，当前用户和所有项目的强度函数值排名，返回np形式
            comp = ReyleighProc.base_compatibility(cur_user_embed, self.updated_item_embed).view(-1).cpu().data.numpy()
            # 对于每个项目
            for i in range(self.num_items):
                # 参数 cmd_opt.add_argument('-dt_type', default='last', help='last/cur')
                prev = cur_time.get_last_interact_time(cur_event.user,
                                                       i) if self.dt_type == 'last' else cur_time.get_cur_time(
                    cur_event.user, i)
                # delta_t与前一个事件的时间差，当成2中的积分S(t)
                self.delta_t[i] = cur_event.t - prev
            # 5中的预测分数p
            scores = self.get_pred_score(comp, self.delta_t)
            # 按预测得分排名，rankdata只得到排名的列表
            ranks = rankdata(-scores)
            # 输入物品的id，得到对于的预测分数排名
            mar = ranks[cur_event.item]
            pre_result = [0 for _ in HR_k]
            for HR_idx in range(len(HR_k)):
                HR = HR_k[HR_idx]
                if mar < HR:
                    pre_result[HR_idx] = 1
                else:
                    pre_result[HR_idx] = 0
            # return pre_result, mae, mse
            return pre_result, mar

        # 训练
        # 计算4.1的负采样survival
        neg_users = rand_sampler.sample_neg_users(cur_event.user, cur_event.item)
        neg_items = rand_sampler.sample_neg_items(cur_event.user, cur_event.item)

        neg_users_embeddings = self.user_embedding(neg_users)
        neg_items_embeddings = self.item_embedding(neg_items)
        # 获取随机采样的对象的embedding
        for i, u in enumerate(neg_users):
            if u in self.user_lookup_embed:
                neg_users_embeddings[i] = self.user_lookup_embed[u]
        for j, i in enumerate(neg_items):
            if i in self.item_lookup_embed:
                neg_items_embeddings[j] = self.item_lookup_embed[i]

        survival = ReyleighProc.survival(cur_event.user, cur_user_embed,
                                         cur_event.item, cur_item_embed,
                                         neg_users_embeddings, neg_users,
                                         neg_items_embeddings, neg_items,
                                         cur_event.t)
        loss = -torch.log(base_comp) + survival
        # return loss, mae, mse
        return loss, 0

    def forward(self, T_begin, events, phase, HR):
        """

        :param T_begin:
        :param events:
        :param phase:
        :param HR:
        :return:
        """
        if phase == 'train':
            cur_time.reset(T_begin)

        # 输出测试用户历史购买次数
        if phase == 'test':
            user_events_num_dic = dict()
            for i in range(len(self.user_events_list)):
                if self.user_events_list[i] > 0:
                    if self.user_events_list[i] in user_events_num_dic:
                        user_events_num_dic[self.user_events_list[i]] += 1
                    else:
                        user_events_num_dic[self.user_events_list[i]] = 1
                    # print(i, '->', self.user_events_list[i], end='  ')
            for key, value in user_events_num_dic.items():
                print(key, ':', value)
            print()

        self.user_lookup_embed = {}
        self.item_lookup_embed = {}

        # 训练集
        with torch.set_grad_enabled(phase == 'train'):
            if phase == 'test':
                self.updated_user_embed = self.user_embedding.weight.clone()
                self.updated_item_embed = self.item_embedding.weight.clone()
                # self.updated_type_embed = self.type_embedding.weight.clone()

            a = [10, 3, 0, -1]
            HR_k = [1, 2, 3, 4, 5, 10, 20]
            if phase == 'test':
                loss = [[0 for _ in HR_k] for _ in a]
                num_total = [0 for _ in a]
                mae = 0
            else:
                loss = 0.0
            # mae = 0.0
            # mse = 0.0
            pbar = enumerate(events)

            for e_idx, cur_event in pbar:
                assert cur_event.t >= T_begin

                if phase == 'train':
                    type_id = self.get_type_id(cur_event.user)
                    if type_id in self.type_user_events:
                        self.type_user_events[type_id].append(cur_event.user)
                    else:
                        self.type_user_events[type_id] = [cur_event.user]

                # cur_loss, cur_mae, cur_mse = self.get_output(cur_event, phase)
                cur_loss, cur_mar = self.get_output(cur_event, phase, HR_k)
                if phase == 'test':
                    n = self.user_events_list[cur_event.user]
                    for a_i in range(len(a)):
                        if n > a[a_i]:
                            for loss_idx in range(len(loss[a_i])):
                                loss[a_i][loss_idx] += cur_loss[loss_idx]
                            # loss[a_i] += cur_loss
                            num_total[a_i] += 1
                            break
                    mae += cur_mar
                else:
                    loss += cur_loss
                # mae += cur_mae
                # mse += cur_mse
                if e_idx + 1 == len(events):
                    break

                cur_user_embed = self.user_lookup_embed[cur_event.user]
                cur_item_embed = self.item_lookup_embed[cur_event.item]

                cur_user_type = self.get_type_id(cur_event.user)
                # cur_type_embed = self.user_lookup_embed[cur_user_type + self.num_users]

                self.user_lookup_embed[cur_event.user] = self.user_cell(cur_item_embed, cur_user_embed)
                self.item_lookup_embed[cur_event.item] = self.item_cell(cur_user_embed, cur_item_embed)
                # self.user_lookup_embed[cur_user_type + self.num_users] = self.type_cell(cur_user_embed, cur_type_embed)
                # self.type_lookup_embed[cur_user_type] = self.type_cell(cur_user_embed, cur_type_embed)

                if phase == 'test':  # update embeddings into the embed mat
                    self.updated_user_embed[cur_event.user] = self.user_lookup_embed[cur_event.user]
                    self.updated_item_embed[cur_event.item] = self.item_lookup_embed[cur_event.item]
                    # self.updated_user_embed[cur_user_type + self.num_users] = self.user_lookup_embed[
                    #     cur_user_type + self.num_users]
                    # self.updated_type_embed[cur_user_type] = self.type_lookup_embed[cur_type_embed][0]

                cur_time.update_event(cur_event.user, cur_event.item, cur_event.t)
                if phase == 'train':
                    self.user_events_list[cur_event.user] += 1
            # rmse = torch.sqrt(mse / len(events)).item()
            # mae = mae.item() / len(events)
            torch.set_grad_enabled(True)
            # if phase == 'train':
            #     return loss / len(events), mae, rmse
            # else:
            #     return loss, mae, rmse

            if phase == 'train':
                return loss / len(events)
            else:
                return round(mae / len(events), 2), loss, num_total
