from turtle import forward
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pad_sequence
import numpy as np, itertools, random, copy, math
from models.encoder import UtterEncoder
from transformers import RobertaConfig
from models.PAKB import PAKB
from models.PaG import PaG
from models.Classifier import Classifier
from models.DynamicLSTM import DynamicLSTM


class Model(nn.Module):
    def __init__(self, opt, emotion_num):
        super().__init__()
        self.opt = opt
        config = RobertaConfig.from_pretrained('D:/code/python/myProject/models/roberta-base')
        config.num_hidden_layers = 10
        config.num_attention_heads = 8
        config.hidden_size = 768
        if opt.add_emotion:
            self.emotion_embeddings = nn.Embedding(emotion_num, opt.emotion_dim, padding_idx=0)
        if opt.use_intent_csk or opt.use_react_csk or opt.use_event_csk or opt.use_want_csk or opt.use_intent_csk:
            self.csk_lin = nn.Linear(opt.csk_dim, opt.hidden_dim)
        if opt.use_role:
            self.role_embedding = nn.Embedding(3, opt.hidden_dim, padding_idx=2)
        self.utter_encoder = UtterEncoder(config, opt.model_size, opt.hidden_dim, rnn_dropout=opt.rnn_dropout)
        self.pakb = PAKB(opt, opt.hidden_dim, opt.hidden_dim, opt.hidden_dim, dropout=0.4, n_heads=1)
        self.pag = PaG(opt, opt.window_size, opt.hidden_dim, 2)
        self.rnn = DynamicLSTM(opt.hidden_dim, opt.hidden_dim, num_layers=2, dropout=self.opt.lstm_dropout)
        self.classifier = Classifier(opt)

        if opt.use_pos:
            self.position_embeddings = nn.Embedding(32, opt.hidden_dim, padding_idx=31)
            self.position_embeddings.weight.data.uniform_(-0.1, 0.1)

    def forward(self, input_ids, attention_mask, emotion_label, relative_position, intra_mask, inter_mask, bf, af,  xW, xR, xE, xI, oW, oR, oE, oI, speaker_mask, umask):
        text_len = torch.sum(umask != 0, dim=-1).cpu()
        utter_emb = self.utter_encoder(input_ids, attention_mask, text_len)  # conv_len  # (batch_size, max_turns, dim)

        if self.opt.use_role:
            role_label = []
            for i in range(len(speaker_mask)):
                role = []
                for j in range(len(speaker_mask[i])):
                    if speaker_mask[i][j].cpu().tolist() == [1.0, 0.0]:
                        role.append(0)
                    elif speaker_mask[i][j].cpu().tolist() == [0.0, 1.0]:
                        role.append(1)
                    else:
                        role.append(2)
                role_label.append(torch.tensor(role))
            role_label = torch.stack(role_label, dim=0).to(self.opt.device)
            role_emb = self.role_embedding(role_label)
            utter_emb = utter_emb + role_emb


        if self.opt.add_emotion:
            emo_emb = self.emotion_embeddings(emotion_label)
            utter_features1 = utter_emb + emo_emb
        else:
            emo_emb = None

        # if self.opt.use_pos:
        #     position_emb = self.position_embeddings(relative_position) # target_utter 距离history中每一个句子的长度
        #     utter_emb = utter_emb + position_emb

        utter_features2 = utter_emb

        if self.opt.use_effect_csk:
            effect_csk_intra = F.relu(self.csk_lin(xE)).transpose(0, 1)
            effect_csk_inter = F.relu(self.csk_lin(oE)).transpose(0, 1)
            # effect_csk_intra = self.csk_lin(xE).transpose(0, 1)
            # effect_csk_inter = self.csk_lin(oE).transpose(0, 1)
        else:
            effect_csk_intra, effect_csk_inter = None, None

        if self.opt.use_intent_csk:
            intent_csk_intra = F.relu(self.csk_lin(xI)).transpose(0, 1)
            intent_csk_inter = F.relu(self.csk_lin(oI)).transpose(0, 1)
            # intent_csk_intra = self.csk_lin(xI).transpose(0, 1)
            # intent_csk_inter = self.csk_lin(oI).transpose(0, 1)
        else:
            intent_csk_intra, intent_csk_inter = None, None

        if self.opt.use_react_csk:
            react_csk_intra = F.relu(self.csk_lin(xR)).transpose(0, 1)
            react_csk_inter = F.relu(self.csk_lin(oR)).transpose(0, 1)
            # react_csk_intra = self.csk_lin(xR).transpose(0, 1)
            # react_csk_inter = self.csk_lin(oR).transpose(0, 1)
        else:
            react_csk_intra, react_csk_inter = None, None

        if self.opt.use_want_csk:
            want_csk_intra = F.relu(self.csk_lin(xW)).transpose(0, 1)  # torch.cat([x1, x4], dim=-1)
            want_csk_inter = F.relu(self.csk_lin(oW)).transpose(0, 1)
            # want_csk_intra = self.csk_lin(xW).transpose(0, 1)  # torch.cat([x1, x4], dim=-1)
            # want_csk_inter = self.csk_lin(oW).transpose(0, 1)
        else:
            want_csk_intra, want_csk_inter = None, None

        if self.opt.use_event_csk:
            event_csk_before = F.relu(self.csk_lin(bf)).transpose(0, 1)
            event_csk_after = F.relu(self.csk_lin(af)).transpose(0, 1)
            # event_csk_before = self.csk_lin(bf).transpose(0, 1)
            # event_csk_after = self.csk_lin(af).transpose(0, 1)
        else:
            event_csk_before, event_csk_after = None, None

        pakb_features = self.pakb(utter_features1, text_len, emo_emb,
                                  event_csk_before, event_csk_after,
                                  effect_csk_inter, effect_csk_intra,
                                  intent_csk_inter, intent_csk_intra,
                                  react_csk_inter, react_csk_intra,
                                  want_csk_inter, want_csk_intra,
                                  intra_mask, inter_mask)
        pag_features = self.pag(utter_features2, text_len)
        utter_lstm = self.rnn(utter_features1, text_len)
        out = self.classifier(utter_features1, utter_lstm[0], pakb_features, pag_features, text_len, umask, emo_emb)

        return out

