from torch import nn 
import torch 
import torch.nn.functional as F
import ml_collections
import math
import copy

DEVICE='cuda' if torch.cuda.is_available() else 'cpu'

class PositionalEncoder(nn.Module):
    def __init__(self, d_model, max_seq_len=80):
        super().__init__()
        self.d_model = d_model
        # 创建一个常量 PE 矩阵
        pe = torch.zeros(max_seq_len, d_model)
        for pos in range(max_seq_len):
            for i in range(0, d_model, 2):
                pe[pos, i] = math.sin(pos / (10000**((2 * i) / d_model)))
                pe[pos, i + 1] = math.cos(pos / (10000**((2 * (i + 1)) / d_model)))
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)

    def forward(self, x):
        # 使得单词嵌入表示相对大一些
        # x = x * math.sqrt(self.d_model)
        # 增加位置常量到单词嵌入表示中
        seq_len = x.size(1)
        x = x + self.pe[:, :seq_len]
        return x

class Mlp(nn.Module):
    def __init__(self, config):
        super(Mlp, self).__init__()
        self.fc1 = nn.Linear(config.hidden_size, config.mlp_dim)
        self.fc2 = nn.Linear(config.mlp_dim, config.hidden_size)
        self.act_fn = torch.nn.functional.gelu
        self.dropout = nn.Dropout(config.mlp_dropout_rate)

        self._init_weights()

    def _init_weights(self):
        nn.init.xavier_uniform_(self.fc1.weight)
        nn.init.xavier_uniform_(self.fc2.weight)
        nn.init.normal_(self.fc1.bias, std=1e-6)
        nn.init.normal_(self.fc2.bias, std=1e-6)

    def forward(self, x):
        x = self.fc1(x)
        x = self.act_fn(x)
        x = self.dropout(x)
        x = self.fc2(x)
        x = self.dropout(x)
        return x


class Attention(nn.Module): #这是一个多头注意力，有self_attn，cross_attn两种模式
    def __init__(self, config):
        super(Attention, self).__init__()
        self.num_attention_heads = config.num_heads
        self.all_head_size = config.all_head_size
        self.attention_head_size = int(self.all_head_size / self.num_attention_heads) #每个头的大小

        self.wq = nn.Linear(config.hidden_size, self.all_head_size)  # # text_hidden_size和img_hidden_size必须相同
        self.wk = nn.Linear(config.hidden_size, self.all_head_size)  
        self.vk = nn.Linear(config.hidden_size, self.all_head_size)  

        self.out = nn.Linear(self.all_head_size, config.hidden_size)
        self.attn_dropout = nn.Dropout(config.attention_dropout_rate)
        self.proj_dropout = nn.Dropout(config.attention_dropout_rate)

        self.softmax = nn.Softmax(dim=-1)  # 计算注意力权重，通过对 Query 和 Key 的点积结果进行归一化

    def transpose_for_scores(self, x): # x的shape必须是(b, N, all_head_size)
        # eg: 768 -> 12*64 将数据转换为适合多头注意力操作的形状，以便能够并行处理多个注意力头
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
        x = x.view(*new_x_shape)  # 将最后一维(all_head_size)改为(num_attention_heads, attention_head_size)
        return x.permute(0, 2, 1, 3)# x.shape (b, num_attention_heads,N,attention_head_size)

    '''
        mode: "self_attn" or "cross_attn
        self_attn_mask, cross_attn_mask是一个二维数组，大小是(bs, 1)，表示每一个batch中有效seq_len大小
        N_text是text的seq_len, N_img是img的seq_len(比如vit把图片划分为16*16的patch和1个cls，因此vit的seq_len为197), 做self_attn的时候用N_text,做corss_attn的时候用N_img
    '''
    def generate_mask(self, self_attn_mask, cross_attn_mask, bs, N_text, N_img, mode = "self_attn"):
        if self_attn_mask == None and cross_attn_mask == None:
            mask = None
        else:
            if mode == "self_attn":
                mask = torch.ones(bs, N_text, device=DEVICE).view(bs,1,1,N_text)
                for i in range(bs):
                    valid_size = self_attn_mask[i,0]
                    mask[i,0,0,0:valid_size] = 0
            else:
                mask = torch.ones(bs, N_img, device=DEVICE).view(bs,1,1,N_img)
                for i in range(bs):
                    valid_size = cross_attn_mask[i,0]
                    mask[i,0,0,0:valid_size] = 0
            mask = mask * (-1e6)
        return mask

    '''
        mode: "self_attn" or "cross_attn
        self_attn_mask, cross_attn_mask是一个二维数组，大小是(bs, 1)，表示每一个batch中有效token的数量
        text_embed.shape = (bs, N_text, hidden_size)
    '''
    def forward(self, text_embed, img_embed = None, mode = "self_attn", self_attn_mask = None, cross_attn_mask = None):
        if mode == "cross_attn" and img_embed is None:
            print("if mode == cross_attn, img_embed must not None")
            assert False
        bs = text_embed.shape[0]
        q = self.wq(text_embed) #(bs, N_text, all_head_size)
        k = self.wk(text_embed) if mode == "self_attn" else self.wk(img_embed)#(bs, N_text, all_head_size) or (bs, N_img, all_head_size)
        v = self.vk(text_embed) if mode == "self_attn" else self.vk(img_embed)#(bs, N_text, all_head_size) or (bs, N_img, all_head_size)
        N_text = text_embed.shape[1]
        N_img = img_embed.shape[1] if img_embed is not None else None
        mask = self.generate_mask(self_attn_mask, cross_attn_mask, bs, N_text, N_img, mode = mode)#(bs,1,1,N_text) or (bs,1,1,N_img)

        multi_q = self.transpose_for_scores(q) #改为多头  (b, num_attention_heads,N,attention_head_size)
        multi_k = self.transpose_for_scores(k) 
        multi_v = self.transpose_for_scores(v)

        attention_scores = torch.matmul(multi_q, multi_k.transpose(-1, -2))  # 注意力分数 = query 点积 key
        if mask is not None:
            attention_scores += mask
        attention_scores = attention_scores / math.sqrt(self.attention_head_size)  # 序列越长分数越大，对其进行比例缩放
        attention_probs = self.softmax(attention_scores)  # 得到实际权重值
        attention_probs = self.attn_dropout(attention_probs)

        context_layer = torch.matmul(attention_probs, multi_v)  # 把权重分配给value 每个位置信息从这一步开始考虑全局特征
        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()  # 将多头注意力还原回原来维度，(b, N,num_attention_heads,attention_head_size)
        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
        attention_output = context_layer.view(*new_context_layer_shape)  #(b, N, all_head_size)
        attention_output = self.out(attention_output)  # 再经过一个全连接层, (b, N, hidden_size)
        attention_output = self.proj_dropout(attention_output)
        return attention_output

class Block(nn.Module):
    def __init__(self, config):
        super(Block, self).__init__()
        self.hidden_size = config.hidden_size
        self.attention_norm = nn.LayerNorm(config.hidden_size, eps=1e-6)
        self.ffn_norm = nn.LayerNorm(config.hidden_size, eps=1e-6)
        self.ffn = Mlp(config)
        self.attn = Attention(config)

    '''
        mode: "self_attn" or "cross_attn
    '''
    def forward(self, text_embed, img_embed = None, mode = "self_attn", self_attn_mask = None, cross_attn_mask = None):
        h = text_embed  #(b, N, hidden_size)
        x = self.attention_norm(text_embed)  # 标准化
        x = self.attn(text_embed, self_attn_mask = self_attn_mask, cross_attn_mask = cross_attn_mask) #(b, N, hidden_size)
        x = x + h  # 想象成残差连接
        
        if mode == "cross_attn":
            h = x
            x = self.attention_norm(x)
            x = self.attn(x, img_embed=img_embed, mode="cross_attn", self_attn_mask = self_attn_mask, cross_attn_mask = cross_attn_mask)
            x = x + h
            
        h = x
        x = self.ffn_norm(x)  # 全连接
        x = self.ffn(x)  # 全连接
        x = x + h
        return x
    

class Transformer_Encoder(nn.Module):
    def __init__(self, config):
        super(Transformer_Encoder, self).__init__()
        self.text_embedding = nn.Embedding(config.vocab_size, config.hidden_size)
        self.pos_embeddings = PositionalEncoder(config.hidden_size)
        self.layer = nn.ModuleList()
        self.layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-6)
        for _ in range(config.num_block_layer):  # 构造多个 Transformer 层
            layer = Block(config)
            self.layer.append(copy.deepcopy(layer))  # 深拷贝

    '''
        mode == "itc" or "itm"
    '''
    def forward(self, text_x, img_embed = None, mode = "itc", self_attn_mask = None, cross_attn_mask = None): #text_x = (bs, N),N为sequence length
        text_embed = self.text_embedding(text_x) #(bs, N, hidden_size)
        text_embed = self.pos_embeddings(text_embed)
        for layer_block in self.layer:
            if mode == "itc":
                text_embed = layer_block(text_embed, self_attn_mask = self_attn_mask, cross_attn_mask = cross_attn_mask)
            else:
                text_embed = layer_block(text_embed, img_embed, mode = "cross_attn", self_attn_mask = self_attn_mask, cross_attn_mask = cross_attn_mask)
        features = self.layer_norm(text_embed)
        return features #(bs, N, hidden_size)


if __name__=='__main__':
    x=torch.tensor([1,2,3,4,5,6,7,8,9,0]).unsqueeze(0)
    # img_embed = torch.randn(1,1,256)
    # config = get_text_config()
    # word_embedding = nn.Embedding(10, 256)
    # t = Transformer_Encoder(config)
    # word_vec = word_embedding(x)
    # out = t(word_vec, img_embed, mode = "itm")
    # print(word_vec.shape)