# Position Aware Knowledge Base
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import RobertaModel, BertModel, RobertaConfig
from torch.nn.utils.rnn import pad_sequence, pad_packed_sequence, pack_padded_sequence
from models.ROPE import precompute_freqs_cis, apply_rotary_emb
import numpy as np, itertools, random, copy, math


class CKS_POS_Measure(nn.Module):
    def __init__(self, opt, in_features, hidden_features, attention_probs_dropout_prob=0.1):
        super().__init__()
        self.opt = opt
        self.hidden_dim = hidden_features
        self.input_dim = in_features
        self.num_head = 1
        self.attention_dropout_prob = attention_probs_dropout_prob

        self.W_intra = nn.Linear(self.input_dim, self.hidden_dim)
        self.W_inter = nn.Linear(self.input_dim, self.hidden_dim)
        self.QW = nn.Linear(self.input_dim, self.hidden_dim)
        self.KW = nn.Linear(self.input_dim, self.hidden_dim)
        self.VW = nn.Linear(self.input_dim, self.hidden_dim)
        self.dropout = nn.Dropout(self.attention_dropout_prob)
        self.freqs_cis = precompute_freqs_cis(self.hidden_dim // self.num_head, opt.max_utter_len * 2)

    def forward(self, target_vector, hidden_vectors, target_emotion, target_bf_cks, intra_csk, inter_csk, intra_mask, inter_mask, conv_len, type='emotion'):
        batch_size, seq_len, x_dim = hidden_vectors.size()
        freqs_cis_q_index = []
        for i in range(batch_size):
            freqs_cis_q_index.append(conv_len[i]-1)
        freqs_cis_q = torch.index_select(self.freqs_cis, 0, torch.tensor(freqs_cis_q_index)).to(self.opt.device)
        if type == 'emotion' and len(target_emotion) > 0:
            query_layer_intra = apply_rotary_emb(self.QW(target_vector + target_emotion + target_bf_cks).unsqueeze(0), freqs_cis_q).squeeze()
            query_layer_inter = apply_rotary_emb(self.KW(target_vector + target_emotion + target_bf_cks).unsqueeze(0), freqs_cis_q).squeeze()
        else :
            query_layer_intra = apply_rotary_emb(self.QW(target_vector + target_bf_cks).unsqueeze(0), freqs_cis_q).squeeze()
            query_layer_inter = apply_rotary_emb(self.KW(target_vector + target_bf_cks).unsqueeze(0), freqs_cis_q).squeeze()

        if len(query_layer_inter.shape) == 2:
            query_layer_inter = query_layer_inter.unsqueeze(1)
        elif len(query_layer_inter.shape) == 1:
            query_layer_inter = query_layer_inter.unsqueeze(0).unsqueeze(0)
        if len(query_layer_intra.shape) == 2:
            query_layer_intra = query_layer_intra.unsqueeze(1)
        elif len(query_layer_intra.shape) == 1:
            query_layer_intra = query_layer_intra.unsqueeze(0).unsqueeze(0)

        freqs_cis_k = self.freqs_cis[0 : hidden_vectors.shape[1]].to(self.opt.device)
        key_layer_intra = apply_rotary_emb((self.KW(hidden_vectors) + self.W_intra(intra_csk)).unsqueeze(2), freqs_cis_k).squeeze()
        key_layer_inter = apply_rotary_emb((self.KW(hidden_vectors) + self.W_inter(inter_csk)).unsqueeze(2), freqs_cis_k).squeeze()

        value_layer_intra = self.VW(hidden_vectors) + self.W_intra(intra_csk)
        value_layer_inter = self.VW(hidden_vectors) + self.W_inter(inter_csk)

        if len(key_layer_inter.shape) == 2:
            key_layer_inter = key_layer_inter.unsqueeze(0)
        elif len(key_layer_inter.shape) == 1:
            key_layer_inter = key_layer_inter.unsqueeze(0).unsqueeze(0)
        if len(key_layer_intra.shape) == 2:
            key_layer_intra = key_layer_intra.unsqueeze(0)
        elif len(key_layer_intra.shape) == 1:
            key_layer_intra = key_layer_intra.unsqueeze(0).unsqueeze(0)
        if len(value_layer_inter.shape) == 2:
            value_layer_inter = value_layer_inter.unsqueeze(0)
        if len(value_layer_intra.shape) == 2:
            value_layer_intra = value_layer_intra.unsqueeze(0)

        attention_scores_intra = torch.matmul(query_layer_intra, key_layer_intra.transpose(-1, -2))
        attention_scores_intra = attention_scores_intra / math.sqrt(self.hidden_dim)
        attention_scores_intra = attention_scores_intra * intra_mask

        attention_scores_inter = torch.matmul(query_layer_inter, key_layer_inter.transpose(-1, -2))
        attention_scores_inter = attention_scores_inter / math.sqrt(self.hidden_dim)
        attention_scores_inter = attention_scores_inter * inter_mask

        attention_probs_intra = nn.Softmax(dim=-1)(attention_scores_intra.masked_fill(intra_mask==0, -1e9))
        attention_probs_inter = nn.Softmax(dim=-1)(attention_scores_inter.masked_fill(inter_mask==0, -1e9))

        cks_enhanced = attention_probs_intra.transpose(1, 2)*value_layer_intra + attention_probs_inter.transpose(1, 2)*value_layer_inter
        target_enhanced = (attention_probs_intra).transpose(1, 2)*query_layer_intra.expand(batch_size, seq_len, x_dim) + (attention_probs_inter).transpose(1, 2)*query_layer_inter.expand(batch_size, seq_len, x_dim)
        final_features = cks_enhanced + target_enhanced
        # final_features = self.dropout(final_features)
        return final_features


class PAKB(nn.Module):
    def __init__(self, opt, n_feat, n_hid, csk_features, dropout, n_heads, alpha=0.01):
        super().__init__()
        self.opt = opt
        self.input_dim = n_feat
        self.hidden_dim = n_hid
        self.csk_features = csk_features
        self.dropout = dropout
        self.n_heads = n_heads
        self.alpha = alpha
        self.cks_pos_measure = CKS_POS_Measure(opt, self.input_dim, self.hidden_dim, attention_probs_dropout_prob=0.4)
        self.cks_weight_intra = nn.Linear(self.csk_features * 4, self.input_dim)
        self.cks_weight_inter = nn.Linear(self.csk_features * 4, self.input_dim)
        self.layernorm = nn.LayerNorm(self.input_dim)

    def forward(self, x, conv_len, emo_vector,
                            event_csk_before, event_csk_after,
                            effect_csk_inter, effect_csk_intra,
                            intent_csk_inter, intent_csk_intra,
                            react_csk_inter, react_csk_intra,
                            want_csk_inter, want_csk_intra,
                            intra_mask, inter_mask):
        batch_size = x.shape[0]
        len = x.shape[1]
        dim = x.shape[2]
        x = F.dropout(x, self.dropout)

        # x_feature = self.layernorm(x)

        cks_intra = self.cks_weight_intra(torch.cat([effect_csk_intra, intent_csk_intra, react_csk_intra, want_csk_intra], dim=-1))
        cks_inter = self.cks_weight_inter(torch.cat([effect_csk_inter, intent_csk_inter, react_csk_inter, want_csk_inter], dim=-1))

        # cks_intra = self.cks_weight_intra(effect_csk_intra+intent_csk_intra+react_csk_intra+want_csk_intra)
        # cks_inter = self.cks_weight_inter(effect_csk_inter+intent_csk_inter+react_csk_inter+want_csk_inter)

        # cks_intra = self.cks_weight_intra(torch.cat([effect_csk_intra, react_csk_intra, want_csk_intra], dim=-1))
        # cks_inter = self.cks_weight_inter(torch.cat([effect_csk_inter, react_csk_inter, want_csk_inter], dim=-1))

        target_emotion = []
        target_vector = []
        target_intra_mask = []
        target_inter_mask = []
        target_bf_cks = []

        for i in range(batch_size):
            if self.opt.add_emotion:
                target_emotion.append(emo_vector[i][conv_len[i]-1].unsqueeze(0))
            target_vector.append(x[i][conv_len[i]-1].unsqueeze(0))
            target_intra_mask.append(intra_mask[i][conv_len[i]-1].unsqueeze(0))
            target_inter_mask.append(inter_mask[i][conv_len[i]-1].unsqueeze(0))
            target_bf_cks.append(event_csk_before[i][conv_len[i]-1].unsqueeze(0))

        if self.opt.add_emotion:
            target_emotion = torch.cat(target_emotion, dim=0).unsqueeze(1).to(self.opt.device)
        target_vector = torch.cat(target_vector, dim=0).unsqueeze(1).to(self.opt.device)
        target_intra_mask = torch.cat(target_intra_mask, dim=0).unsqueeze(1).to(self.opt.device)
        target_inter_mask = torch.cat(target_inter_mask, dim=0).unsqueeze(1).to(self.opt.device)
        target_bf_cks = torch.cat(target_bf_cks, dim=0).unsqueeze(1).to(self.opt.device)

        cks_connection = self.cks_pos_measure(target_vector, x, target_emotion, target_bf_cks, cks_intra, cks_inter, target_intra_mask, target_inter_mask, conv_len, type='emotion')

        # residual connect
        out = cks_connection + x
        # out = cks_connection + x_feature
        out = F.relu(out)
        out = self.layernorm(out)
        out = F.dropout(out, self.dropout)

        return out















