import torch
import torch.nn as nn
import torchvision.models as models
import math

def masked_softmax(X, valid_lens):
    """通过在最后一个轴上遮蔽元素来执行 softmax 操作"""
    # `X`: 3D张量, `valid_lens`: 1D或2D 张量
    if valid_lens is None:
        return nn.functional.softmax(X, dim=-1)
    else:
        ## todo add mask support
        return None
        # shape = X.shape
        # if valid_lens.dim() == 1:
        #     valid_lens = torch.repeat_interleave(valid_lens, shape[1])
        # else:
        #     valid_lens = valid_lens.reshape(-1)
        # # 在最后的轴上，被遮蔽的元素使用一个非常大的负值替换，从而其 softmax (指数)输出为 0
        # X = d2l.sequence_mask(X.reshape(-1, shape[-1]), valid_lens,
        #                       value=-1e6)
        # return nn.functional.softmax(X.reshape(shape), dim=-1)

class DotProductAttention(nn.Module):
    """缩放点积注意力"""
    def __init__(self, dropout, **kwargs):
        super(DotProductAttention, self).__init__(**kwargs)
        self.dropout = nn.Dropout(dropout)

    # `queries` 的形状：(`batch_size`, 查询的个数, `d`)
    # `keys` 的形状：(`batch_size`, “键－值”对的个数, `d`)
    # `values` 的形状：(`batch_size`, “键－值”对的个数, 值的维度)
    # `valid_lens` 的形状: (`batch_size`,) 或者 (`batch_size`, 查询的个数)
    def forward(self, queries, keys, values, valid_lens=None):
        d = queries.shape[-1]
        # 设置 `transpose_b=True` 为了交换 `keys` 的最后两个维度
        scores = torch.bmm(queries, keys.transpose(1,2)) / math.sqrt(d)
        self.attention_weights = masked_softmax(scores, valid_lens)
        return torch.bmm(self.dropout(self.attention_weights), values)

class AdditiveAttention(nn.Module):
    """加性注意力"""
    def __init__(self, query_size, key_size, num_hiddens, dropout, **kwargs):
        super(AdditiveAttention, self).__init__(**kwargs)
        self.W_k = nn.Linear(key_size, num_hiddens, bias=False)
        self.W_q = nn.Linear(query_size, num_hiddens, bias=False)
        self.w_v = nn.Linear(num_hiddens, 1, bias=False)
        self.dropout = nn.Dropout(dropout)

    def forward(self, queries, keys, values, valid_lens=None):
        queries, keys = self.W_q(queries.unsqueeze(1)), self.W_k(keys)
        # 在维度扩展后，
        # `queries` 的形状：(`batch_size`, 查询的个数, 1, `num_hidden`)
        # `key` 的形状：(`batch_size`, 1, “键－值”对的个数, `num_hiddens`)
        # 使用广播方式进行求和
        features = queries.unsqueeze(2) + keys.unsqueeze(1)
        features = torch.tanh(features)
        # `self.w_v` 仅有一个输出，因此从形状中移除最后那个维度。
        # `scores` 的形状：(`batch_size`, 查询的个数, “键-值”对的个数)
        scores = self.w_v(features).squeeze(-1)
        self.attention_weights = masked_softmax(scores, valid_lens)
        # `values` 的形状：(`batch_size`, “键－值”对的个数, 值的维度)
        print(self.attention_weights.size())
        print(values.size())
        output = torch.bmm(self.dropout(self.attention_weights), values)
        return output.squeeze(1)

class Encoder(nn.Module):

    def __init__(self, embedding_size):

        super(Encoder, self).__init__()

        resnet = models.resnet50(pretrained=True)

        # Remove the fully connected layers, since we don't need the original resnet classes anymore
        modules = list(resnet.children())[:-2]
        self.resnet = nn.Sequential(*modules)

        self.squeeze = nn.Conv2d(in_channels=2048, out_channels=256, kernel_size=1)

        # Create a new fc layer based on the embedding size
        self.linear = nn.Linear(
            in_features=resnet.fc.in_features, out_features=embedding_size)
        self.BatchNorm = nn.BatchNorm1d(
            num_features=embedding_size, momentum=0.01)

    def forward(self, images):
        features = self.resnet(images)
        features = self.squeeze(features)
        '''
                                        128     * 49
        feature size = batch_size * channel_size * hidden_size
        '''
        features = features.view(features.size(0), features.size(1), -1)
        # features = self.BatchNorm(self.linear(features))
        return features

class MyLSTMCell(nn.Module):
    def __init__(self, embedding_size, hidden_size, training, device):
        super(MyLSTMCell, self).__init__()
        self.lstm_cell = nn.LSTMCell(embedding_size, hidden_size, bias=True)
        self.attention = Attention(embedding_size=hidden_size, key_size=49, value_size=49, hidden_size=hidden_size,
                                   dropout=0.1, training=training)

    def forward(self, state, current_embedding, image_feat=None, attention_flag=True):
        h, c = state
        if attention_flag:
            att_feat, att = self.attention(h, image_feat, image_feat)
            input_feat = torch.cat((att_feat, current_embedding), dim=1) # combine image and text input
        else:
            input_feat = torch.cat((image_feat.squeeze(0), current_embedding), dim=1)
        h_out, c_out = self.lstm_cell(input_feat, (h, c))
        return h_out, c_out

class Attention(nn.Module):
    def __init__(self, embedding_size, key_size, value_size, hidden_size, dropout, training, nums_heads=1, bias=False):
        super(Attention, self).__init__()
        # self.hidden_size = hidden_size
        # self.dropout = dropout
        # self.num_heads = nums_heads
        # self.training = training
        #
        # self.fc_q = nn.Linear(embedding_size, hidden_size * nums_heads, bias=bias)
        # self.fc_k = nn.Linear(key_size, hidden_size * nums_heads, bias=bias)
        # self.fc_v = nn.Linear(value_size, hidden_size * nums_heads, bias=bias)
        # self.attention = DotProductAttention(dropout)
        # self.fc_o = nn.Linear(hidden_size * nums_heads, 256, bias=bias)
        self.attention = AdditiveAttention(embedding_size, key_size, hidden_size, dropout)

    def forward(self, query, key, value):

        # print('query size:', query.size())
        # Q = self.fc_q(query).unsqueeze(1)
        # # print("Q size:", Q.size())
        # if not self.training:
        #     key = key.squeeze(1)
        #     value = value.squeeze(1)
        # K = self.fc_k(key)
        # # print("K size:", K.size())
        # V = self.fc_v(value)
        # # print("V size:", V.size())
        #
        # Q = self.transpose_qkv(Q, self.num_heads)
        # K = self.transpose_qkv(K, self.num_heads)
        # V = self.transpose_qkv(V, self.num_heads)
        #
        # output = self.attention(Q, K, V)
        # output = self.transpose_output(output, self.num_heads)
        # output = self.fc_o(output).squeeze(1)
        output = self.attention(query, key, value);
        return output, None

    def transpose_qkv(self, X, num_heads):
        # 输入 `X` 的形状: (`batch_size`, 查询或者“键－值”对的个数, `num_hiddens`).
        # 输出 `X` 的形状: (`batch_size`, 查询或者“键－值”对的个数, `num_heads`,
        # `num_hiddens` / `num_heads`)
        X = X.reshape(X.shape[0], X.shape[1], num_heads, -1)

        # 输出 `X` 的形状: (`batch_size`, `num_heads`, 查询或者“键－值”对的个数,
        # `num_hiddens` / `num_heads`)
        X = X.permute(0, 2, 1, 3)

        # `output` 的形状: (`batch_size` * `num_heads`, 查询或者“键－值”对的个数,
        # `num_hiddens` / `num_heads`)
        return X.reshape(-1, X.shape[2], X.shape[3])

    def transpose_output(self, X, num_heads):
        """逆转 `transpose_qkv` 函数的操作"""
        X = X.reshape(-1, num_heads, X.shape[1], X.shape[2])
        X = X.permute(0, 2, 1, 3)
        return X.reshape(X.shape[0], X.shape[1], -1)

# https://github.com/njchoma/transformer_image_caption/blob/master/src/models/simple_model.py
class Decoder(nn.Module):

    def __init__(self, embed_size, hidden_size, vocab_size, vocab, device, training=True):
        super(Decoder, self).__init__()
        self.device = device
        self.embed_size = embed_size

        self.embed = nn.Embedding(
            num_embeddings=vocab_size, embedding_dim=embed_size).to(device)
        start = torch.LongTensor([vocab.get_id_by_token(vocab.get_start_token())]).to(device)
        self.start_embedding = self.embed(start).unsqueeze(1)
        end = torch.LongTensor([vocab.get_id_by_token(vocab.get_end_token())]).to(device)
        self.end_embedding = self.embed(end).unsqueeze(1)

        self.lstm = MyLSTMCell(embedding_size=embed_size + 49, hidden_size=hidden_size, device=device, training=training)
        self.linear = nn.Linear(in_features=hidden_size,
                                out_features=vocab_size)
        self.hidden_size = hidden_size

    """
    features: batch * feature_size
    captions: batch * max_token_length
    length: batch size * 1, representing real token length
    """
    def forward(self, features, captions, length, attention_flag=True):
        embeddings = []
        for caption in captions:
            # print(caption.size())
            embeddings.append(self.embed(caption))
        features = features.unsqueeze(1) # batch size * 1 * feature size
        for i in range(0, features.size(0)):
            temp = torch.zeros((1, 1, 256)).cuda()
            embeddings[i] = torch.cat((temp, embeddings[i]), 1).contiguous()
        '''
        由于将feature map放在首位 后续的每一个token都与feature map产生联系
        '''
        hidden = None
        for j, embedding in enumerate(embeddings):
            h = torch.zeros((1, self.hidden_size)).cuda()
            c = torch.zeros((1, self.hidden_size)).cuda()
            for i in range(embedding.size(1)):
                h, c = self.lstm((h, c), embedding[:, i], features[j], attention_flag=attention_flag)
                if i == embedding.size(1) - 1:
                    continue
                if hidden is None:
                    hidden = h
                else:
                    hidden = torch.cat((hidden, h), 0)

        output = self.linear(hidden.squeeze(0).contiguous())  # (batch x tokens_length) * labels_count
        return output

    def sample(self, features, start, end, longest_sentence_length=100, attention_flag=True):
        sampled_ids = []
        image_feat = features.unsqueeze(0) # batch size * 256
        h = torch.zeros((1, self.hidden_size)).to(self.device)
        c = torch.zeros((1, self.hidden_size)).to(self.device)

        start = torch.LongTensor([start]).cuda()
        embedding = self.embed(start)
        for i in range(longest_sentence_length):
            if i == 0:
                h, c = self.lstm((h, c), embedding, image_feat, attention_flag=attention_flag)
            else:
                h, c = self.lstm((h, c), embedding, image_feat, attention_flag=attention_flag)
            output = self.linear(h.squeeze(1))
            predicted = output.max(dim=1, keepdim=True)[1]
            if predicted == end:
                break
            sampled_ids.append(predicted)
            embedding = self.embed(predicted).squeeze(1)
        sampled_ids = torch.cat(sampled_ids, 1)
        return sampled_ids.squeeze()


if __name__ == "__main__":
    device = "cuda"
    decoder = Decoder(256, 512, 19, device)
    decoder.to(device)
    captions = []
    caption = torch.tensor([0,  4,  5,  6,  7,  8,  7,  8,  7,  8,  9, 10,  5, 11,  5, 12,  7, 13,
          7, 14,  9,  9, 10,  5, 15,  5, 12,  7, 13,  7, 14,  9, 15,  5, 12,  7,
         13,  7, 14,  9,  9, 10,  5, 15,  5, 12,  7, 13,  7, 16,  9, 15,  5, 12,
          7, 13,  7, 14,  9,  9,  1]).to(device)
    captions.append(caption)
    image_feat = torch.rand(1,  256).to(device)
    output = decoder(image_feat, captions, None)
    print(output.size())

