import torch
import torch.nn as nn
import copy
import torch.nn.functional as F
import math
from torch.autograd import Variable


def _get_activation_fn(activation):
    if activation == "relu":
        return F.relu
    elif activation == "gelu":
        return F.gelu

    raise RuntimeError("activation should be relu/gelu, not {}".format(activation))

class PositionalEncoding(nn.Module):
    "Implement the PE function."
    def __init__(self, d_model, dropout=0.1, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)
        
        # Compute the positional encodings once in log space.
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) *
                             -(math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)
        
    def forward(self, x):
        x = x + self.pe[:, :x.size(1)]
        return self.dropout(x)

class MultiheadAttention(nn.Module):

    def __init__(self, model_dim, num_heads, drop_out=0.1):
        super(MultiheadAttention, self).__init__()
        assert model_dim % num_heads == 0
        # key, query, value projections for all heads
        self.key = nn.Linear(model_dim, model_dim)
        self.query = nn.Linear(model_dim, model_dim)
        self.value = nn.Linear(model_dim, model_dim)
        # regularization
        self.attn_drop = nn.Dropout(drop_out)
        self.resid_drop = nn.Dropout(drop_out)
        # output projection
        self.proj = nn.Linear(model_dim, model_dim)
        self.n_head = num_heads
        # self.q_layer_norm = nn.LayerNorm(model_dim)
        # self.k_layer_norm = nn.LayerNorm(model_dim)
        # self.v_layer_norm = nn.LayerNorm(model_dim)

    def forward(self, q, k, v, mask=None):
        B, T, C = q.size()

        # calculate query, key, values for all heads in batch and move head forward to be the batch dim

        k = self.key(k).view(B, -1, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, S, hs)
        q = self.query(q).view(B, -1, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
        v = self.value(v).view(B, -1, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, S, hs)

        # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, S) -> (B, nh, T, S)
        att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
        if mask is not None:
            mask = mask.unsqueeze(1) #(B, 1, T, S)
        att = att.masked_fill(mask == True, -1e10) # todo: just use float('-inf') instead?
        att = F.softmax(att, dim=-1)
        att = self.attn_drop(att)
        y = att @ v # (B, nh, T, S) x (B, nh, S, hs) -> (B, nh, T, hs)
        y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side

        # output projection
        y = self.proj(y)
        return y

class Synthesizer(nn.Module):
    def __init__(self, model_dim, num_heads, drop_out=0.1):
        super(Synthesizer, self).__init__()
        d_hid = model_dim // 4
        d_k = model_dim // num_heads
        max_seq_len = 512
        self.n_head = num_heads
        self.w_1 = nn.Linear(d_k, d_hid)
        self.w_2 = nn.Linear(d_hid, max_seq_len)
        self.relu = nn.ReLU()
        self.attn_drop = nn.Dropout(drop_out)
        self.key = nn.Linear(model_dim, model_dim)
        self.query = nn.Linear(model_dim, model_dim)
        self.value = nn.Linear(model_dim, model_dim)
        self.proj = nn.Linear(model_dim, model_dim)
    
    def forward(self, q, k, v, mask=None):

        B, T, C = q.size()

        # calculate query, key, values for all heads in batch and move head forward to be the batch dim

        k = self.key(k).view(B, -1, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, S, hs)
        q = self.query(q).view(B, -1, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
        v = self.value(v).view(B, -1, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, S, hs)

        # causal dense-attention; Attend: (B, nh, T, hs) -> (B, nh, T, S)
        att = self.w_2(self.relu(self.w_1(q)))[:,:,:,:k.size(-2)]

        if mask is not None:
            mask = mask.unsqueeze(1) #(B, 1, T, S)
        att = att.masked_fill(mask == True, -1e10) # todo: just use float('-inf') instead?
        att = F.softmax(att, dim=-1)
        att = self.attn_drop(att)
        y = att @ v # (B, nh, T, S) x (B, nh, S, hs) -> (B, nh, T, hs)
        y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side

        # output projection
        y = self.proj(y)
        return y

class PositionalWiseFeedForward(nn.Module):

    def __init__(self, model_dim=512, ffn_dim=2048, dropout=0.1, activation='relu'):
        super(PositionalWiseFeedForward, self).__init__()
        self.w1 = nn.Conv1d(model_dim, ffn_dim, 1)
        self.w2 = nn.Conv1d(ffn_dim, model_dim, 1)
        self.dropout = nn.Dropout(dropout)
        self.activation = _get_activation_fn(activation)

    def forward(self, x):
        output = x.transpose(1, 2)
        output = self.w2(self.activation(self.w1(output)))
        output = self.dropout(output.transpose(1, 2))

        return output

class MLP(nn.Module):
    def __init__(self, input_size, output_size, hidden_size, gate=False):
        super(MLP, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.fc2 = nn.Linear(hidden_size, output_size)
        self.relu = nn.ReLU()
        self.gate = gate

    def forward(self, x):
        out = self.fc1(x)
        out = self.relu(out)
        out = self.fc2(out)
        if self.gate:
            return F.softmax(out, dim=-1)
        else: 
            return out

class MoeTransformer(nn.Module):
    def __init__(self, vocab_size, max_len=30, expert_num=10, choose_expert_num=4, his_num=15, adapter_size=64, model_dim=768, num_heads=12, ffn_dim=2048, n_layers=10,  drop_out=0.1, activation='gelu'):
        super(MoeTransformer, self).__init__()

        encoder_layer = EncoderLayer(model_dim, num_heads, expert_num, choose_expert_num, adapter_size, ffn_dim, drop_out, activation)
        decoder_layer = DecoderLayer(model_dim, num_heads, ffn_dim, drop_out, activation)
        his_layer = EncoderLayer(model_dim, num_heads, 1, 1, 1, ffn_dim, drop_out, activation) #expert_num is 1 means normal transformer enceoder
        self.gate = MLP(model_dim, expert_num, model_dim, True)
        self.his_encoder = Encoder(his_layer, n_layers)
        self.encoder = Encoder(encoder_layer, n_layers)
        self.decoder = Decoder(decoder_layer, n_layers)
        self.multi_decoder = MultiDecoder(self.decoder, expert_num)
        self.src_embedder = nn.Embedding(vocab_size, model_dim)
        self.tgt_embedder = nn.Embedding(vocab_size, model_dim)
        self.seg_embedder = nn.Embedding(his_num, model_dim)
        self.pos_encoder = PositionalEncoding(model_dim, drop_out, max_len)
        self.fc_out = nn.Linear(model_dim, vocab_size)
        self.dec_layer_norm = nn.LayerNorm(model_dim)
        self.enc_layer_norm = nn.LayerNorm(model_dim)
        self.his_layer_norm = nn.LayerNorm(model_dim)
        self.max_len = max_len
        self.choose_expert_num = choose_expert_num
        self.expert_num = expert_num
        
        self.apply(self._init_weights)
        print("number of parameters: {}".format(sum(p.numel() for p in self.parameters())))

    def _init_weights(self, module):
        if isinstance(module, (nn.Linear, nn.Embedding)):
            module.weight.data.normal_(mean=0.0, std=0.02)
            if isinstance(module, nn.Linear) and module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)

    def padding_mask(self, seq_k, seq_q, pad_token=1):
        # seq_k 和 seq_q 的形状都是 [B,L]
        len_q = seq_q.size(1)
        pad_mask = seq_k.eq(pad_token)
        pad_mask = pad_mask.unsqueeze(1).expand(-1, len_q, -1)  # shape [B, L_q, L_k]
        return pad_mask

    def sequence_mask(self, seq_len):
        mask = torch.triu(torch.ones((seq_len, seq_len)), diagonal=1)
        mask = mask.unsqueeze(0)  # [1, L, L]
        return mask

    def forward(self, src, his, his_seg, tgt=None):
        # history encoding module
        his_inputs = self.tgt_embedder(his)
        his_inputs = self.pos_encoder(his_inputs)
        # print(his_inputs.shape, his_seg.shape)
        his_inputs = his_inputs + self.seg_embedder(his_seg)
        his_mask = self.padding_mask(his, his)
        his_outputs = self.his_layer_norm(self.his_encoder(his_inputs, his_mask))
        #gating module
        his_encodes = torch.mean(his_outputs, dim=1)
        logits = self.gate(his_encodes)
        top_logits, top_indices = logits.topk(min(self.choose_expert_num, self.expert_num), dim=1)
        top_k_logits = top_logits[:, :self.choose_expert_num]
        top_k_indices = top_indices[:, :self.choose_expert_num]
        top_k_gates = F.softmax(top_k_logits, dim=-1)
        zeros = torch.zeros_like(logits, requires_grad=True)
        gates = zeros.scatter(1, top_k_indices, top_k_gates)
        #encoding module
        src_inputs = self.src_embedder(src)
        src_inputs = self.pos_encoder(src_inputs)
        src_mask = self.padding_mask(src, src)
        encoder_outputs = self.enc_layer_norm(self.encoder(src_inputs, src_mask, gates))

        #decoding module
        if tgt is not None:
            tgt_inputs = self.tgt_embedder(tgt)
            # tgt_inputs = self.src_embedder(tgt)
            tgt_inputs = self.pos_encoder(tgt_inputs)
            mem_mask = self.padding_mask(src, tgt)
            history_mask = self.padding_mask(his, tgt)
            tgt_mask = self.padding_mask(tgt, tgt)
            tgt_mask = torch.gt(tgt_mask + Variable(self.sequence_mask(tgt.size(-1)).type_as(tgt_mask.data)), 0)
            # tgt_outputs = self.dec_layer_norm(self.decoder(tgt_inputs, encoder_outputs, mem_mask, tgt_mask))
            tgt_outputs = self.dec_layer_norm(self.multi_decoder(tgt_inputs, his_outputs, encoder_outputs, history_mask, mem_mask, tgt_mask, gates))
            predictions = self.fc_out(tgt_outputs)
            return F.log_softmax(predictions, dim=-1)
        else:
            sos_token = 2
            decoder_outputs = []
            device = src.device
            batch_size = src.size(0)
            output = torch.full((batch_size, 1), sos_token, dtype=torch.long, device=device, requires_grad=False)
            for i in range(self.max_len):
                embedded_output = self.tgt_embedder(output)
                # embedded_output = self.src_embedder(output)
                embedded_output = self.pos_encoder(embedded_output)
                mem_mask = self.padding_mask(src, output)
                history_mask = self.padding_mask(his_outputs, tgt)
                tgt_mask = self.padding_mask(output, output)
                tgt_mask = torch.gt(tgt_mask + Variable(self.sequence_mask(output.size(-1)).type_as(tgt_mask.data)), 0)
                decoder_output = self.dec_layer_norm(self.decoder(embedded_output, his_outputs, encoder_outputs, history_mask, mem_mask, tgt_mask)) 
                prediction = self.fc_out(decoder_output[:,-1,:])
                prediction = F.log_softmax(prediction, dim=-1)
                decoder_outputs.append(prediction)
                output =  torch.cat((output, prediction.topk(1)[1].long().view(batch_size, -1)), dim=1)
        return decoder_outputs

class EncoderLayer(nn.Module):
    def __init__(self, model_dim=512, num_heads=8, expert_num=1, choose_expert_num=4, adapter_size=64, ffn_dim=2048, drop_out=0.1, activation='relu'):
        super(EncoderLayer, self).__init__()
        self.expert_num = expert_num
        # self.attn = MultiheadAttention(model_dim, num_heads, drop_out)
        self.attn = Synthesizer(model_dim, num_heads, drop_out)
        self.fc = PositionalWiseFeedForward(model_dim, ffn_dim, drop_out, activation)
        self.atten_layer_norm = nn.LayerNorm(model_dim)
        self.con_layer_norm = nn.LayerNorm(model_dim)
        self.adapter_norm = nn.LayerNorm(model_dim)
        self.dropout = nn.Dropout(drop_out)
        self.choose_expert_num = choose_expert_num
        if expert_num > 1:
            expert = MLP(model_dim, model_dim, adapter_size)
            self.experts = nn.ModuleList([copy.deepcopy(expert) for _ in range(expert_num)])

    def forward(self, inputs, mask=None, attention_expert=None):
        batch_size, src_len, model_dim = inputs.shape

        x = inputs
        x_norm = self.atten_layer_norm(x)
        y = self.attn(x_norm, x_norm, x_norm, mask)

        x = self.dropout(x + y)
        x_norm = self.con_layer_norm(x)
        y = self.fc(x_norm)

        x = self.dropout(x + y)
        if self.expert_num > 1:
            x_norm = self.adapter_norm(x)
            expert_output = []
            for b in range(batch_size):
                x_b_list = []
                for idx in range(self.expert_num):
                    if attention_expert[b, idx] > 0.001:
                        x_b_idx = attention_expert[b, idx] * self.experts[idx](x_norm[b,:,:])
                        x_b_list.append(x_b_idx)
                x_b = torch.stack(x_b_list, dim=0)
                x_b = x_b.sum(dim=0)
                expert_output.append(x_b)
            y = torch.stack(expert_output, dim=0)
            output = self.dropout(x + y)
            return output 
        else:
            return x

class Encoder(nn.Module):
    def __init__(self, encoder_layer, n_layers):
        super(Encoder, self).__init__()
        self.layers = nn.ModuleList([copy.deepcopy(encoder_layer) for _ in range(n_layers)])
    
    def forward(self, x, mask=None, attention_expert=None):
        for layer in self.layers:
            x = layer(x, mask, attention_expert)
        return x

class DecoderLayer(nn.Module):
    def __init__(self, model_dim=512, num_heads=8, ffn_dim=2048, drop_out=0.1, activation='relu'):
        super(DecoderLayer, self).__init__()
        # self.tgt_attn = MultiheadAttention(model_dim, num_heads, drop_out)
        self.tgt_attn = Synthesizer(model_dim, num_heads, drop_out)
        self.mem_attn = MultiheadAttention(model_dim, num_heads, drop_out)
        self.persona_attn = MultiheadAttention(model_dim, num_heads, drop_out)
        self.fc = PositionalWiseFeedForward(model_dim, ffn_dim, drop_out, activation)
        self.atten_layer_norm = nn.LayerNorm(model_dim)
        self.mem_layer_norm = nn.LayerNorm(model_dim)
        self.persona_layer_norm = nn.LayerNorm(model_dim)
        self.con_layer_norm = nn.LayerNorm(model_dim)
        self.dropout = nn.Dropout(drop_out)

    def forward(self, inputs, history, memory, history_mask, memory_mask=None, tgt_mask=None):

        x = inputs
        x_norm = self.atten_layer_norm(x)
        y = self.tgt_attn(x_norm, x_norm, x_norm, tgt_mask)

        x = self.dropout(x + y)
        x_norm = self.persona_layer_norm(x)
        y = self.persona_attn(x_norm, history, history, history_mask)

        x = self.dropout(x + y)
        x_norm = self.mem_layer_norm(x)
        y = self.mem_attn(x_norm, memory, memory, memory_mask)

        x = self.dropout(x + y)
        x_norm = self.con_layer_norm(x)
        y = self.fc(x_norm)
        
        output = self.dropout(x + y)
        return output

class Decoder(nn.Module):
    def __init__(self, decoder_layer, n_layers):
        super(Decoder, self).__init__()
        self.layers = nn.ModuleList([copy.deepcopy(decoder_layer) for _ in range(n_layers)])
    
    def forward(self, x, history, memory, history_mask=None, memory_mask=None, tgt_mask=None):
        for layer in self.layers:
            x = layer(x, history, memory, history_mask, memory_mask, tgt_mask)
        return x

class MultiDecoder(nn.Module):
    def __init__(self, decoder, expert_num):
        super(MultiDecoder, self).__init__()
        self.expert_num = expert_num
        self.decoders = nn.ModuleList([copy.deepcopy(decoder) for _ in range(expert_num)])
    
    def forward(self, x, history, memory, history_mask=None, memory_mask=None, tgt_mask=None, attention_expert=None):
        expert_output = []
        batch_size, tgt_len, model_dim = x.shape
        for b in range(batch_size):
            x_b_list = []
            for idx in range(self.expert_num):
                if attention_expert[b, idx] > 0.001:
                    x_b_idx = attention_expert[b, idx] * self.decoders[idx](x[b,:,:].unsqueeze(0), history[b,:,:].unsqueeze(0), memory[b,:,:].unsqueeze(0), history_mask[b,:,:].unsqueeze(0), memory_mask[b,:,:].unsqueeze(0), tgt_mask[b,:,:].unsqueeze(0)).squeeze(0)
                    x_b_list.append(x_b_idx)
            x_b = torch.stack(x_b_list, dim=0)
            x_b = x_b.sum(dim=0)
            expert_output.append(x_b)
        y = torch.stack(expert_output, dim=0)
        return y

