import torch
import torch.nn as nn

class OPT:
    def __init__(self):
        self.hidden_dim = 100

class CausePredictor(nn.Module):
    def __init__(self, input_dim, mlp_dim, mlp_dropout=0.1):
        super(CausePredictor, self).__init__()
        self.input_dim = input_dim
        self.mlp_dim = mlp_dim
        self.mlp_dropout = mlp_dropout
        self.mlp = nn.Sequential(nn.Linear(input_dim * 3, mlp_dim, False), nn.ReLU(), nn.LayerNorm(mlp_dim), nn.Dropout(mlp_dropout),
                                 nn.Linear(mlp_dim, mlp_dim, False), nn.ReLU(), nn.LayerNorm(mlp_dim), nn.Dropout(mlp_dropout))
        # self.mlp = nn.Sequential(nn.Linear(input_dim * 3, mlp_dim, False), nn.ReLU(), nn.Dropout(mlp_dropout))
        self.predictor_weight = nn.Linear(mlp_dim, 1, False)

    def forward(self, x, conv_len, mask):
        predict_score = self.predictor_weight(self.mlp(x)).squeeze(-1)
        predict_score = torch.sigmoid(predict_score) * mask

        return predict_score

class Classifier(nn.Module):
    def __init__(self, opt):
        super().__init__()
        self.opt = opt
        self.integration = nn.Linear(opt.hidden_dim * 2, opt.hidden_dim)
        self.weight = nn.Linear(opt.hidden_dim, opt.hidden_dim)
        self.mlp_dim = 300
        self.predictor = CausePredictor(opt.hidden_dim, self.mlp_dim, self.opt.mlp_dropout)
        self.layernorm = nn.LayerNorm(opt.hidden_dim)
        self.inteInfo_dropout = nn.Dropout(p=0.3)

    def forward(self, utter_emb, utter_lstm, pakb_features, pag_features, conv_len, umask, emo_vector):
        '''
        :param utter_emb: [batch_size, max_len, dim]
        :param utter_lstm: [batch_size, max_len, dim]
        :param pakb_features: [batch_size, max_len, dim]
        :param pag_features: [batch_size, max_len, dim]
        :return:
        '''
        batch_size, max_len, dim = utter_emb.shape
        utter_features = self.integration(torch.cat((pakb_features, pag_features), dim=-1))
        # utter_features = self.layernorm(utter_features)

        intermedia_info_batch = []
        for i in range(batch_size):
            intermedia_info = torch.zeros(max_len, self.opt.hidden_dim).to(self.opt.device)
            for j in range(conv_len[i]):
                h = torch.zeros(1, self.opt.hidden_dim).to(self.opt.device)
                for k in range(j, conv_len[i]):
                    h += utter_lstm[i][k]
                intermedia_info[j] = self.inteInfo_dropout(self.weight(h))
            intermedia_info_batch.append(intermedia_info)
        intermedia_info_batch = torch.stack(intermedia_info_batch, dim=0)

        target_utter = []
        target_emotion = []
        for i in range(batch_size):
            if self.opt.add_emotion:
                target_emotion.append(emo_vector[i][conv_len[i]-1].unsqueeze(0))
            target_utter.append(utter_emb[i][conv_len[i]-1].unsqueeze(0))

        if self.opt.add_emotion:
            target_emotion = torch.cat(target_emotion, dim=0).unsqueeze(1).expand(batch_size, max_len, dim)
        target_utter = torch.cat(target_utter, dim=0).unsqueeze(1).expand(batch_size, max_len, dim) + target_emotion

        final_feature = torch.cat((utter_features, intermedia_info_batch, target_utter), dim=-1)
        logits = self.predictor(final_feature, conv_len, umask)
        return logits


if __name__ == '__main__':
    x1 = torch.rand(2, 2, 100)
    x2 = torch.rand(2, 2, 100)
    x3 = torch.rand(2, 2, 100)
    x4 = torch.rand(2, 2, 100)
    x_len = torch.tensor([1, 2])
    opt = OPT()
    cls = Classifier(opt)
    umask = torch.tensor([[1,0], [1,1]])
    out = cls(x1, x2, x3, x4, x_len, umask)
    print(1)