from __future__ import absolute_import

import math

import torch
import torch.nn as nn
import torch.nn.functional as F

from .self_conv import Self_Conv_Encoder_layer


class ConvLayer(nn.Module):
    def __init__(self, in_channel, out_channel, kernel_size, stride, padding, act='relu'):
        super(ConvLayer, self).__init__()
        self.act = act
        self.conv = nn.Conv2d(in_channel, out_channel, kernel_size, stride, padding)
        self.bn = nn.BatchNorm2d(out_channel)
        if act == 'relu':
            self.act_fn = nn.ReLU(True)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        if self.act != None:
            x = self.act_fn(x)
        return x


class PreCNN(nn.Module):
    def __init__(self, in_channel=3, out_channel=512):
        super(PreCNN, self).__init__()
        self.conv1 = ConvLayer(in_channel, 32, (3, 3), (1, 1), padding=1)
        self.maxpool1 = nn.MaxPool2d((2, 2), (2, 2))
        self.conv2 = ConvLayer(32, 64, (3, 3), (1, 1), padding=1)
        self.maxpool2 = nn.MaxPool2d((2, 2), (2, 2))
        self.channel_interactive = nn.Linear(64 * 8, out_channel)

    def forward(self, x):
        x = self.conv1(x)
        x = self.maxpool1(x)
        x = self.conv2(x)
        x = self.maxpool2(x)  # b,64,h,w
        b, c, h, w = x.size()
        x = x.permute(0, 3, 2, 1).contiguous().view(b, w, h * c)  # b,w,h*c
        x = self.channel_interactive(x)
        return x


class PositionalEncoding(nn.Module):
    def __init__(self, d_model, dropout=0.1, max_len=200):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(dropout)
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)  # -> max_len x 1
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose(0, 1)
        self.register_buffer('pe', pe)

    def forward(self, x):
        x = x + self.pe[:x.size(0), :, :]
        return self.dropout(x)


class Trans_encoder_layer(nn.Module):
    def __init__(self, d_model=512, n_head=8, dim_ffn=1024, dropout_rate=0.2, act='relu'):
        super(Trans_encoder_layer, self).__init__()
        self.self_atten = nn.MultiheadAttention(d_model, n_head, dropout=dropout_rate)

        self.linear1 = nn.Linear(d_model, dim_ffn)
        self.dropout1 = nn.Dropout(dropout_rate)
        self.linear2 = nn.Linear(dim_ffn, d_model)

        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.norm3 = nn.LayerNorm(d_model)
        self.dropout2 = nn.Dropout(dropout_rate)
        self.dropout3 = nn.Dropout(dropout_rate)
        if act != None:
            self.act = F.relu

    def forward(self, encoder_features):
        """
        T,N,E
        """
        encoder_features = self.norm1(encoder_features)
        out, _ = self.self_atten(encoder_features, encoder_features, encoder_features)
        out = encoder_features + self.dropout1(out)

        out = self.norm2(out)
        out2 = self.linear2(self.dropout2(self.act(self.linear1(out))))
        out = out + self.dropout3(out2)

        return out


class Cnn_Trans_encoder(nn.Module):
    def __init__(self, in_channel=1, out_channel=512, num_layers=6, d_model=512, nhead=8, dim_ffn=1024, dropout_rate=0.2, act='relu', self_conv=False):
        super(Cnn_Trans_encoder, self).__init__()
        self.precnn = PreCNN(in_channel, )
        self.pos_encoding = PositionalEncoding(d_model, dropout_rate)
        self.layers = nn.ModuleList()
        self.norm = nn.LayerNorm(d_model)
        self.self_conv = self_conv
        for _ in range(num_layers):
            if self_conv == True:
                self.layers.append(Self_Conv_Encoder_layer())
            else:
                self.layers.append(Trans_encoder_layer(d_model, nhead, dim_ffn, dropout_rate, act))

    def forward(self, image):
        encoder_features = self.precnn(image)  # b,l,c
        encoder_features = encoder_features.permute(1, 0, 2)  # l,b,c
        encoder_features = self.pos_encoding(encoder_features)
        # print(encoder_features.size())
        if self.self_conv:
            encoder_features = encoder_features.permute(1, 0, 2).contiguous()  # l,b,c -> b,l,c
        for encoder_layer in self.layers:
            encoder_features = encoder_layer(encoder_features)
        # print(encoder_features.size())
        if self.self_conv:
            encoder_features = encoder_features.permute(1, 0, 2).contiguous()
        out = self.norm(encoder_features)
        return out


class Trans_decoder_layer(nn.Module):
    def __init__(self, d_model=512, nhead=8, dim_ffn=1024, dropout_rate=0.2, act='relu'):
        super(Trans_decoder_layer, self).__init__()

        self.self_atten = nn.MultiheadAttention(d_model, nhead, dropout=dropout_rate)
        self.cross_atten = nn.MultiheadAttention(d_model, nhead, dropout=dropout_rate)

        self.linear1 = nn.Linear(d_model, dim_ffn)
        self.dropout = nn.Dropout(dropout_rate)
        self.linear2 = nn.Linear(dim_ffn, d_model)

        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.norm3 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout_rate)
        self.dropout2 = nn.Dropout(dropout_rate)
        self.dropout3 = nn.Dropout(dropout_rate)
        if act != None:
            self.act = F.relu

    def forward(self, dec_input, encoder_features, atten_mask=None, memory_mask=None, tgt_key_mask=None, memory_key_mask=None):
        out, _ = self.self_atten(dec_input, dec_input, dec_input, attn_mask=atten_mask, key_padding_mask=tgt_key_mask)
        out = dec_input + self.dropout1(out)
        out = self.norm1(out)
        out2, atten_weights = self.cross_atten(out, encoder_features, encoder_features, attn_mask=None, key_padding_mask=memory_key_mask)
        out = out + self.dropout2(out2)
        out = self.norm2(out)
        out2 = self.linear2(self.dropout(self.act(self.linear1(out))))
        out = out + self.dropout3(out2)
        out = self.norm3(out)

        return out


class Trans_decoder(nn.Module):
    def __init__(self, num_class, num_layers=3, d_model=512, nhead=8, dim_ffn=1024, dropout_rate=0.2, act='relu'):
        super(Trans_decoder, self).__init__()
        self.decoder_pe = PositionalEncoding(d_model, dropout=dropout_rate)
        self.decoder_emb = nn.Embedding(num_class, d_model)
        self.norm = nn.LayerNorm(d_model)
        self.de_layers = nn.ModuleList()
        for _ in range(num_layers):
            self.de_layers.append(Trans_decoder_layer(d_model, nhead, dim_ffn, dropout_rate, act))
        self.cls_op = nn.Sequential(nn.Linear(d_model, d_model * 2), nn.ReLU(True), nn.Linear(d_model * 2, num_class))
        self.STA_index = num_class - 1
        self.END_index = num_class - 2

    @staticmethod
    def generate_square_subsequent_mask(L):
        mask = (torch.triu(torch.ones(L, L)) == 1).transpose(0, 1)
        mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
        return mask

    def forward(self, encoder_features, dec_input, tgt_padding):
        """
        encoder_features: l_q,B,C
        de_input: B,L
        tgt_padding: B,L
        """
        device = encoder_features.device
        dec_input = dec_input.permute(1, 0)  # L,B
        L_q, B = dec_input.size()
        dec_input = torch.cat([self.STA_index * torch.ones(1, B).type_as(dec_input), dec_input[:-1, :]], dim=0)
        atten_mask = Trans_decoder.generate_square_subsequent_mask(L_q).to(device)
        dec_input_padding_mask = torch.cat([tgt_padding[:, 0:1], tgt_padding[:, :-1]], dim=1)

        dec_input = self.decoder_emb(dec_input)
        dec_input = self.decoder_pe(dec_input)

        for decoder_layer in self.de_layers:
            dec_input = decoder_layer(dec_input, encoder_features, atten_mask=atten_mask, tgt_key_mask=dec_input_padding_mask)
        dec_input = self.norm(dec_input)
        logits = self.cls_op(dec_input)
        logits = logits.permute(1, 0, 2)  # B,L,C

        return logits

    def inference(self, encoder_features, max_len=100):
        device = encoder_features.device
        L, B, C = encoder_features.size()
        stop_check = torch.zeros(B).to(device)
        dec_input = self.STA_index * torch.ones((1, B), dtype=int).long().to(device)
        atten_mask = Trans_decoder.generate_square_subsequent_mask(1).to(device)
        length = 0
        # while(torch.sum(stop_check)!=stop_check.size(0) and length < max_len):
        while (length < max_len):
            dec_input = self.decoder_emb(dec_input)
            dec_input = self.decoder_pe(dec_input)
            for decoder_layer in self.de_layers:
                dec_input = decoder_layer(dec_input, encoder_features, atten_mask=atten_mask)
            dec_input = self.norm(dec_input)
            logits = self.cls_op(dec_input)
            pred = logits.argmax(2)
            # pred,pred_score = logits.max(2)
            stop_check = torch.max(stop_check, pred[-1, :] == self.END_index)
            dec_input = torch.cat([self.STA_index * torch.ones(1, B).long().to(device), pred], dim=0)
            atten_mask = Trans_decoder.generate_square_subsequent_mask(dec_input.size(0)).to(device)
            length += 1
        pred = pred.permute(1, 0)  # B,L
        logits = logits.permute(1, 0, 2)  # B,L,C
        pred_score = F.softmax(logits, dim=2)
        _, pred_score = pred_score.max(2)
        return pred, logits, pred_score


if __name__ == '__main__':
    n = nn.MultiheadAttention()
