from operator import mod
from time import pthread_getcpuclockid
from typing import List, Tuple
import torch
from torch import nn
from torch.nn import Module
from .transformer import Transformer, Cross_Transformer
from .interest_match import InterestMatch
from .hgru import MGRU
from .embedding import EmbeddingLayer

class MHT(Module):
    def __init__(self, cfg, device, mode="train"):
        super().__init__()
        self.mode = mode
        self.emb = EmbeddingLayer(cfg, device=device).to(device)
        trainCfg = cfg.get("Train")
        modality_map = {
            "audio": trainCfg.get("audio_origin_dim"), 
            "video": trainCfg.get("video_origin_dim"), 
            "face": trainCfg.get("face_origin_dim"),
            "title": trainCfg.get("title_origin_dim")
            }
        self.modality_map_lin = {name: nn.Linear(in_features=dim, out_features=trainCfg.get("modality_dim")).to(device) for name, dim in modality_map.items()} 
        context_map = {
            "time": trainCfg.get("context_time_origin_dim"),
            "scaler": trainCfg.get("context_scaler_origin_dim"),
            "discrete": trainCfg.get("context_discrete_dim"),
        }
        self.context_map_lin = {name: nn.Linear(in_features=dim, out_features=trainCfg.get("context_dim")).to(device) for name, dim in context_map.items()}
        
        self.tranformer = Transformer(dim=trainCfg.get("modality_dim"), depth=2, heads=8, dim_head=16, mlp_dim=128, dropout=0.5)
        self.cross_tranformer = Cross_Transformer(dim=trainCfg.get("modality_dim"), depth=2, heads=8, dim_head=16, mlp_dim=128, dropout=0.5)
        self.out_tranformer = Transformer(dim=trainCfg.get("modality_dim"), depth=2, heads=8, dim_head=16, mlp_dim=128, dropout=0.5)
        self.global_token = nn.Parameter(torch.zeros(trainCfg.get("modality_dim")))
        self.Y_pre_lin = nn.Linear(in_features=trainCfg.get("context_dim")*6, out_features=128*4)

        self.im = InterestMatch(dim=trainCfg.get("context_dim"), out_dim=trainCfg.get("mgru_input_dim"))
        
        self.mgru = MGRU(input_size=trainCfg.get("mgru_input_dim"), hidden_size=trainCfg.get("mgru_hidden_dim"), n_layers=trainCfg.get("mgru_n_layers"), block_size=trainCfg.get("mgru_n_blocks"))
        self.im_o = InterestMatch(dim=trainCfg.get("mgru_hidden_dim"), out_dim=trainCfg.get("im_out_dim"))

        # self.like_lin0 = nn.Linear(trainCfg.get("modality_dim")+trainCfg.get("uid_embedding_size")+trainCfg.get("item_id_embedding_size")+trainCfg.get("im_out_dim"), trainCfg.get("ctr_mlp_out_dim"))
        self.like_lin0 = nn.Linear(trainCfg.get("modality_dim")+trainCfg.get("uid_embedding_size")+trainCfg.get("item_id_embedding_size"), trainCfg.get("ctr_mlp_out_dim"))
        self.like_lin1 = nn.Linear(trainCfg.get("ctr_mlp_out_dim"), 1)

        self.bce_loss = torch.nn.BCELoss(reduction="mean")

    def forward(self, u_item_id: torch.Tensor, data_content: Tuple[torch.Tensor], data_behavior: Tuple[torch.Tensor]):
        audio_feature, face_feature, time_feature, \
        title_feature, video_feature, context_field_scaler, \
        context_field_discrete = data_content

        behavior_discrete_tensor, behavior_duration_tensor, behavior_time_tensor = data_behavior
        
        audio_feature = self.modality_map_lin.get("audio")(audio_feature)
        face_feature = self.modality_map_lin.get("face")(face_feature)
        video_feature = self.modality_map_lin.get("video")(video_feature)
        title_feature = self.modality_map_lin.get("title")(title_feature)

        
        uid_tensor, item_id_tensor, context_embedding_tensor,  behavior_embedding_tensor= self.emb(u_item_id, context_field_discrete, behavior_discrete_tensor)
        
        # context
        time_feature = self.context_map_lin.get("time")(time_feature)
        context_field_scaler = self.context_map_lin.get("scaler")(context_field_scaler)
        context_embedding_tensor = self.context_map_lin.get("discrete")(context_embedding_tensor)

        # behavior
        behavior_time_tensor = self.context_map_lin.get("time")(behavior_time_tensor)
        behavior_duration_tensor = self.context_map_lin.get("scaler")(behavior_duration_tensor)
        behavior_embedding_tensor = self.context_map_lin.get("discrete")(behavior_embedding_tensor)



        b, d = audio_feature.shape
        X = torch.stack([self.global_token.repeat((b, 1)), audio_feature, face_feature, video_feature, title_feature], dim=1)
        X_self = self.tranformer(X)
        global_token_0 = X_self[:, 0:1, :]

        Y_pre = torch.cat([time_feature, context_field_scaler, context_embedding_tensor.view(b, -1)], dim=1)
        Y_pre = self.Y_pre_lin(Y_pre)
        
        b, d_c = Y_pre.shape
        Y = torch.cat([global_token_0, Y_pre.view(b, 4, -1)], dim=1)

        X_cross = self.cross_tranformer(X, Y)

        content = torch.cat([X_cross, item_id_tensor[:, None, :], X_self[:, 1:, :]], dim=1) # ([8, 10, 128])
        content = self.out_tranformer(content).sum(1)
        # intersting
        ec = torch.cat([time_feature[:, None, :], context_field_scaler[:, None, :], context_embedding_tensor], dim=1)
        et = torch.cat([behavior_time_tensor[:, :, None, :], behavior_duration_tensor[:, :, None, :], behavior_embedding_tensor], dim=2)
        behavior_tensor_ = torch.cat([ec[:, None, :, :], et], dim=1)
        ie = self.im(behavior_tensor_, uid_tensor)

        o_ie, h_ie = self.mgru(ie[:, 1:])
        interest = self.im_o(o_ie, ie[:, 0, :]) # ([8, 128])
        # out = torch.cat([uid_tensor, content, item_id_tensor, interest], dim=1)
        out = torch.cat([uid_tensor, content, item_id_tensor], dim=1)
        out = self.like_lin1(self.like_lin0(out))
        return torch.sigmoid(out)
    
    def loss(self, out, y):
        return self.bce_loss(out, y)

    def get_params(self):
        p = list(self.parameters())
        for _, net in self.modality_map_lin.items():
            p += list(net.parameters())

        
        for _, net in self.context_map_lin.items():
            p += list(net.parameters())
        return p
        
        
