import torch
import torch.nn as nn
from transformer_posmb import posemb_sincos_1d, posemb_sincos_2d
from transformer_encoder_decoder import BaseTransformerLayer

class CBA(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1):
        super(CBA, self).__init__()
        self.cov = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=kernel_size // 2, stride=stride)
        self.bn = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU()

    def forward(self, x):
        x = self.cov(x)
        x = self.bn(x)
        x = self.relu(x)
        return x

class PatchLayer(nn.Module):
    def __init__(self, img_size, patch_size=20, embeding_dim=64):
        super(PatchLayer, self).__init__()
        self.grid_size = (img_size[0] // patch_size, img_size[1] // patch_size)
        self.num_patches = self.grid_size[0] * self.grid_size[1]
        # self.proj = nn.Conv2d(in_channels=3,
        #                       out_channels=embeding_dim,
        #                       kernel_size=(20, 20),
        #                       stride=16,
        #                       padding=2)
        self.proj = nn.Sequential(
            CBA(in_channels=3, out_channels=64, kernel_size=5, stride=2),
            CBA(in_channels=64, out_channels=64, kernel_size=3, stride=2),
            CBA(in_channels=64, out_channels=128, kernel_size=3, stride=2),
            CBA(in_channels=128, out_channels=embeding_dim, kernel_size=3, stride=2),
            CBA(in_channels=embeding_dim, out_channels=embeding_dim, kernel_size=3, stride=1),
        )
        self.norm = nn.LayerNorm(normalized_shape=embeding_dim)

    def forward(self, img):
        img = self.proj(img)  # 图片分割
        img = img.flatten(start_dim=2)  # wh拉平
        img = img.permute(0, 2, 1)  # [b wh c]
        img = self.norm(img)
        return img


class Vit(nn.Module):
    def __init__(self, img_size=[224, 224], patch_size=16, num_classes=1000,
                 embed_dim=768, depth=12, num_heads=12):
        super(Vit, self).__init__()

        self.patch_embed = PatchLayer(img_size, patch_size, embed_dim)
        num_patches = self.patch_embed.num_patches
        self.blocks = nn.Sequential(*[
            BaseTransformerLayer(attn_cfgs=[dict(embed_dim=embed_dim, num_heads=num_heads, att_dropout=0.1)],
                                 fnn_cfg=dict(embed_dim=embed_dim, feedforward_channels=4 * embed_dim, act_cfg='ReLU',
                                              ffn_drop=0.1),
                                 operation_order=('norm', 'self_attn', 'norm', 'ffn'))
            for _ in range(depth)
        ])

        # 类别编码
        self.cls_token = nn.Parameter(torch.zeros(size=[1, 1, embed_dim]))
        # 固定位置编码和可学习位置编码
        # self.pos_embed = posemb_sincos_1d(len=num_patches + 1, dim=embed_dim,temperature=1000).unsqueeze(0)
        self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
        self.norm = nn.LayerNorm(normalized_shape=embed_dim)

        # 分类头
        self.head = nn.Linear(embed_dim, num_classes)

        self.loss_class = nn.CrossEntropyLoss()  # 内置softmax
        self.init_weights()

    def init_weights(self):
        # Weight init
        nn.init.trunc_normal_(self.pos_embed, std=0.02)
        nn.init.trunc_normal_(self.cls_token, std=0.02)
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.trunc_normal_(m.weight, std=.01)
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode="fan_out")
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m, nn.LayerNorm):
                nn.init.zeros_(m.bias)
                nn.init.ones_(m.weight)

    def hand(self, input):
        # 预处理 pacth 和 embeding
        # 先进行patch_embeding
        input = self.patch_embed(input)  # [1,3,400,400] -> [1,400,64]
        # cat 类别token
        cls_token = self.cls_token.expand(len(input), -1, -1)
        output = torch.cat((input, cls_token), dim=1)  # [1,400,64] cat [1,1,64] = [1,401,64]
        return output

    def extract_feature(self, input):
        # backbone + neck
        # 有无位置编码
        input = input + self.pos_embed.to(input)
        # 注意力机制
        input = input.permute(1, 0, 2) #注意输入的维度顺序
        input = self.blocks(input)  # 自注意力
        input = self.norm(input)
        input = input.permute(1, 0, 2)
        return input

    def forward(self, img):
        query = self.hand(img)
        query = self.extract_feature(query)
        cls_fea = query[:, -1, :]  # 刚刚class_token被cat到了dim1的最后一个数
        x = self.head(cls_fea)
        return x

    def loss(self, data, label):
        # 供训练使用
        output = self(data)  # size=[b,num_cls]
        # get loss
        loss = self.loss_class(output, label)
        return loss

    def predict(self, data, label):
        # 供验证使用
        output = self(data).detach()  # size=[b,num_cls]
        # get loss
        loss = self.loss_class(output, label).detach()
        # get accuracy
        pred = torch.argmax(output,dim=1)
        correct_num = torch.eq(pred, label).sum()
        return loss, correct_num
