from math import sqrt
import torch
import torch.nn as nn
from math import sin, cos
from config_class import BaseConfig


class TWIN_Embedding(nn.Module):
    def __init__(self,
                 vocab_size_dic,
                 id_embedding_dim=64,
                 simple_embedding_dim=8,
                 t_max=10000,
                 p_max=10000,
                 m_p=20,
                 m_t=20,
                 device='cpu'
                 ):
        """
        :param vocab_size_dic: 字典，存储用户行为中的各个特征以及其vocab size
                                    格式-->{
                                            'inherent_feature':{'item_id':vocab_num, 'author':vocab_num,...},
                                            'cross_feature':{'play_time':vocab_num, 'date':vocab_num,...}
                                        }
        :param id_embedding_dim: vocab 数量多的特征的embed dim
        :param simple_embedding_dim: vocab 数量少的embed dim
        """
        super(TWIN_Embedding, self).__init__()
        self.vocab_size_dic = vocab_size_dic
        self.simple_embedding_dim = simple_embedding_dim
        self.id_embedding_dim = id_embedding_dim
        self.device=device
        vocab_num_list = [] # [(vocab_num, embedding_dim),...]
        h = 0
        for feature, vocab_num in vocab_size_dic['inherent_feature'].items():
            if 'id' in feature:
                vocab_num_list.append((vocab_num, id_embedding_dim))
                h += id_embedding_dim
            else:
                vocab_num_list.append((vocab_num, simple_embedding_dim))
                h += simple_embedding_dim

        c = 0

        for feature,vocab_num  in vocab_size_dic['cross_feature'].items():
            if 'id' in feature:
                vocab_num_list.append((vocab_num, id_embedding_dim))
                c += id_embedding_dim
            else:
                vocab_num_list.append((vocab_num, simple_embedding_dim))
                c += simple_embedding_dim

        self.h = h
        self.c = c
        self.t_max = t_max
        self.p_max = p_max
        self.m_t = m_t
        self.m_p = m_p
        self.vocab_size_list = [vocab_num for vocab_num, _ in vocab_num_list]

        self.embed_layer_dic = nn.ModuleDict()
        for idx, (vocab_num, embed_dim) in enumerate(vocab_num_list):
            if embed_dim == id_embedding_dim:
                layer_name = 'item_id_embedding_{}'.format(idx)
            else:
                layer_name = str(idx)
            self.embed_layer_dic[layer_name] = nn.Embedding(vocab_num, embed_dim)


    def forward(self, s, timestamps=None):
        """

        :param s: b, behaviors_num, behavior_feature_num:( )
        :return:
        """
        embed_list = []
        index = 0
        for embed_layer in self.embed_layer_dic.values():
            input = s.narrow(-1, start=index, length=1)
            embed_list.append(embed_layer(input).squeeze(-2))
            index += 1
            if index >= s.shape[-1]:
                break
        final_embed = torch.cat(embed_list, dim=-1)
        if timestamps is not None:
            try:
                p = torch.tensor([[sin(float(i) / pow(self.p_max, float(i) / self.m_p)) if i % 2 == 0 else
                              cos(float(i) / pow(self.p_max, float(i-1) / self.m_p))
                              for i in range(timestamps.shape[-1])]
                              for _ in range(timestamps.shape[0])]).unsqueeze(-1).float().to(self.device)

                t = torch.tensor([[sin(float(_j) / pow(self.t_max, float(i) / self.m_t)) if i % 2 == 0 else
                               cos(float(_j) / pow(self.t_max, float(i - 1) / self.m_t))
                               for i, _j in enumerate(j)] for j in timestamps]).unsqueeze(-1).float().to(self.device)
            except:
                p = 0
                t = 0
            final_embed = final_embed + p + t
        if final_embed.shape[-1] == self.h or self.vocab_size_dic['cross_feature'] == {}:
            return final_embed, None
        return (final_embed.narrow(dim=-1,start=0,length=self.h),
                final_embed.narrow(dim=-1,start=self.h,length=self.c))

class ETA(nn.Module):
    def __init__(self, h, c, simple_feature_dim=8):
        """
        :param h: total inherent dim
        :param c: total cross dim
        """
        super(ETA, self).__init__()
        self.simple_feature_dim = simple_feature_dim
        self.wh = nn.Linear(h, h)
        self.wcs = nn.ModuleList()
        if c != 0:
            j = int(c / simple_feature_dim)
            self.j = j
            for i in range(j):
                self.wcs.append(nn.Linear(simple_feature_dim, 1))
            self.beta = nn.Linear(j, 1)
        self.wq = nn.Linear(h, h)

    def forward(self, kh, kc, q):
        """
        :param kh: b, s, h
        :param kc: b, s, c
        :param q: b, h
        """
        dk = kh.shape[-1]
        output_h = self.wh(kh) # b, s, h -> b, s, h
        output_q = self.wq(q) # b, h -> b, h
        output_q = output_q.unsqueeze(1) # b, 1, h
        output_c_list = []
        index = 0
        if len(self.wcs) != 0:
            for wc in self.wcs:
                output_c_list.append(wc(kc.narrow(dim=-1, start=index,length=self.simple_feature_dim))) # b, s, simple_feature_dim -> b, s, 1
                index += self.simple_feature_dim
            output_c = torch.cat(output_c_list, dim=-1) # b, s, j
            # bmm: b,s,h * b, h, 1 -> b, s, 1
            alpha =  output_h.bmm(torch.permute(output_q,[0,2,1])) / sqrt(dk) + self.beta(output_c)
        else :
            alpha = output_h.bmm(torch.permute(output_q, [0, 2, 1])) / sqrt(dk)
        return alpha.squeeze(-1) # b,s


class CP_GSU(nn.Module):
    def __init__(self,
                 twin_embedding,
                 multi_head_etas,
                 w_o,
                 k = 100):
        super().__init__()
        self.embedding = twin_embedding
        self.k = k
        self.multi_head_etas = multi_head_etas
        self.w_o = w_o
        self.heads = len(multi_head_etas)


    def forward(self, target_item, behaviors, timestamps=None):
        """
        :param target_item: b, total_inherent_vocab
        :param behaviors: b, s:behavior_num ,total_cross_vocab
        """
        b = target_item.size(0)
        kh, kc = self.embedding(behaviors,timestamps) # behaviors : b, s, total_cross_vocab, kh: b,s, h, kc:b,s, c
        q, _ = self.embedding(target_item) # b, total_inherent_vocab -> b, h
        # CP-GSU
        alpha_list = []
        for multi_head_eta in self.multi_head_etas:
            alpha_list.append(multi_head_eta(kh, kc, q).unsqueeze(1))
        # 这里不确定CP-GSU中是如何对多个头输出的alpha做融合的，所以直接使用一个平均
        # alpha = torch.cat(alpha_list, dim=1).mean(dim=1)
        alpha = self.w_o(torch.cat(alpha_list, dim = 1).reshape(b, -1, self.heads)).squeeze(-1)
        # 貌似不能使用一个线性层对多头alpha进行合并,因为只训练ESU部分，无法训练GSU独有的这个w_o
        # alpha = self.w_o(torch.cat(alpha_list, dim = 1).reshape(-1, self.heads*self.behavior_num))
        filter_behaviors = []
        filter_timestamps = []
        if timestamps is not None:
            for _alpha, _behaviors, _timestamp in zip(alpha, behaviors, timestamps):
                _, indices = torch.topk(_alpha, k=self.k)
                filter_behaviors.append(_behaviors[indices, :].unsqueeze(0))
                filter_timestamps.append(_timestamp[indices].unsqueeze(0))
            filter_behaviors = torch.cat(filter_behaviors, dim=0)
            filter_timestamps = torch.cat(filter_timestamps, dim=0)
            return filter_behaviors, filter_timestamps
        else:
            for _alpha, _behaviors in zip(alpha, behaviors):
                _, indices = torch.topk(_alpha, k=self.k)
                filter_behaviors.append(_behaviors[indices, :].unsqueeze(0))
            filter_behaviors = torch.cat(filter_behaviors, dim=0)
            return filter_behaviors

class GSU(nn.Module):
    def __init__(self,twin_embedding, k = 100):
        super().__init__()
        self.embedding = twin_embedding
        self.k = k

    def forward(self,target_item, behaviors, timestamps=None):
        kh, kc = self.embedding(behaviors, timestamps)  # behaviors : b, s, total_cross_vocab, kh: b,s, h, kc:b,s, c
        q, _ = self.embedding(target_item)
        if kc is not None:
            alpha = torch.bmm(kh, q.unsqueeze(-1)) + torch.mean(kc,dim=-1,keepdim=True)
        else:
            alpha = torch.bmm(kh, q.unsqueeze(-1))
        alpha = alpha.squeeze(-1)
        filter_behaviors = []
        filter_timestamps = []
        if timestamps is not None:
            for _alpha, _behaviors, _timestamp in zip(alpha, behaviors, timestamps):
                _, indices = torch.topk(_alpha, k=self.k)
                filter_behaviors.append(_behaviors[indices, :].unsqueeze(0))
                filter_timestamps.append(_timestamp[indices].unsqueeze(0))
            filter_behaviors = torch.cat(filter_behaviors, dim=0)
            filter_timestamps = torch.cat(filter_timestamps, dim=0)
            return filter_behaviors, filter_timestamps
        else:
            for _alpha, _behaviors in zip(alpha, behaviors):
                _, indices = torch.topk(_alpha, k=self.k)
                filter_behaviors.append(_behaviors[indices, :].unsqueeze(0))
            filter_behaviors = torch.cat(filter_behaviors, dim=0)
            return filter_behaviors


class ESU(nn.Module):
    def __init__(self,
                 twin_embedding,
                 multi_head_etas,
                 w_o,
                 out_channel=30):
        super().__init__()
        self.embedding = twin_embedding
        self.out_channel = out_channel
        self.multi_head_etas = multi_head_etas
        self.w_v = nn.Linear(self.embedding.h + self.embedding.c, out_channel)
        # self.w_o = nn.Linear(heads * out_channel, out_channel)
        self.w_o = w_o
        self.softmax = nn.Softmax(dim=-1)
        self.heads = len(multi_head_etas)


    def forward(self, target_item, filter_behaviors, filter_timestamps=None):
        """
        :param target_item: b, total_inherent_vocab
        :param fliter_behaviors: b, k ,total_cross_vocab
        """
        q, _ = self.embedding(target_item)
        kh, kc = self.embedding(filter_behaviors,filter_timestamps)
        if kc is not None:
            K = torch.cat((kh, kc), dim=-1)  # b, k, h+c
        else:
            K = kh
        head_list = []
        for multi_head_eta in self.multi_head_etas:
            cur_alpha = self.softmax(multi_head_eta(kh, kc, q)).unsqueeze(1)  # b ,1, s
            head_list.append(cur_alpha.bmm(
                self.w_v(K)))  # w_v(k) : b, s, out_channel  head_list: [ tensor(b ,1, out_channel) , ... ]
        twin = self.w_o(torch.cat(head_list, dim=1).reshape(-1, self.out_channel, self.heads)).squeeze(
            -1)  # head: b, out_channel* head    twin: b, out_channel
        return twin

class ESU_MLP(nn.Module):
    def __init__(self,esu, short_time):
        super().__init__()
        self.esu = esu
        self.behavior_embedding_dim = esu.embedding.h + esu.embedding.c
        self.short_time = short_time
        self.mlp = nn.Sequential(
            nn.Linear(esu.out_channel + self.short_time*self.behavior_embedding_dim, 128 ),
            nn.ReLU(),
            nn.Linear(128, 8 ),
            nn.ReLU(),
            nn.Linear(8,2),
            nn.Softmax(dim=-1)
        )

    def forward(self, target_item, filter_behaviors, all_behaviors,filter_timestamps=None, timestamps=None):
        b = target_item.shape[0]
        if timestamps is not None:
            short_kh, short_kc = self.esu.embedding(all_behaviors[:, :self.short_time, :],
                                                    timestamps[:, :self.short_time])
        else:
            short_kh, short_kc = self.esu.embedding(all_behaviors[:, :self.short_time, :])
        if short_kc is not None:
            short_behaviors_embedding = torch.concat((short_kh, short_kc),dim=-1)
            short_behaviors_embedding = short_behaviors_embedding.reshape(b, -1)
        else :
            short_behaviors_embedding = short_kh.reshape(b, -1)
        twin = self.esu(target_item, filter_behaviors, filter_timestamps)
        x = torch.cat((twin, short_behaviors_embedding), dim=-1)
        return self.mlp(x)


def get_multi_head_etas(heads, h, c, simple_embedding_dim):
    multi_head_etas = nn.ModuleList()
    for i in range(heads):
        multi_head_etas.append(ETA(h, c, simple_embedding_dim))
    return multi_head_etas


def get_twin_embedding(vocab_size_dic,id_embedding_dim=64,simple_embedding_dim=8, device='cpu'):
    return TWIN_Embedding(vocab_size_dic,id_embedding_dim,simple_embedding_dim, device=device)


def get_w_o(heads):
    return nn.Linear(heads, 1)

class TWIN(nn.Module):
    def __init__(self,config:BaseConfig):
        super().__init__()
        self.twin_embedding = get_twin_embedding(config.vocab_size_dic,
                                            id_embedding_dim=config.id_embed_dim,
                                            simple_embedding_dim=config.simple_embed_dim)
        self.multi_head_eta = get_multi_head_etas(config.heads,
                                             self.twin_embedding.h,
                                             self.twin_embedding.c,
                                             self.twin_embedding.simple_embedding_dim)
        self.w_o = get_w_o(config.heads)
        self.gsu = CP_GSU(self.twin_embedding, self.multi_head_eta, self.w_o, k=config.k)
        self.esu = ESU(self.twin_embedding, self.multi_head_eta, self.w_o, out_channel=config.out_channel)
        self.esu_mlp = ESU_MLP(self.esu, short_time=config.short_time)

    def forward(self,target_item, behaviors):
        filter_behaviors = self.gsu(target_item, behaviors)
        pred_y = self.esu_mlp(target_item, filter_behaviors,
                         all_behaviors=behaviors).float()
        return pred_y



if __name__ == '__main__':
    vocab_size_dic = {
        "inherent_feature": {
            "item_id": 5000 + 1,
            "item_category": 5000 + 1
        },
        "cross_feature": {
        }
    }
    behaviors = torch.randint(0, 5000, (10, 200, 2)).long()
    target_item = torch.ones(10,2).long()

    heads = 4
    k = 100
    # behaviors[:,[100,200,300], 1] = -1
    config = BaseConfig(
        vocab_size_dic,
        id_embed_dim=10,
        simple_embed_dim=2,
        k=k,
        seq_len=200
    )
    model = TWIN(config)
    pred_y = model(target_item, behaviors)
    print(pred_y)
