# -- coding: utf-8 --
from torch.nn import functional as F

from .common import *
# from .vit_model import VisionTransformer, PatchEmbed
from models.fusion_vit_only_ir50 import VisionTransformer
from .ir50 import Backbone


class BaseLine(nn.Module):
    def __init__(self, img_size=224, num_classes=7, depth=8, ir_50_path="'./models/pretrain/ir50.pth'"):
        super().__init__()
        self.ir_50_path = ir_50_path
        self.img_size = img_size
        self.num_classes = num_classes
        self.depth = depth

        # CNN feature branch
        self.ir_back = Backbone(50, 0.0, 'ir')
        ir_checkpoint = torch.load(ir_50_path, map_location=lambda storage, loc: storage)
        self.ir_back = load_pretrained_weights(self.ir_back, ir_checkpoint)
        self.ir_layer1 = nn.Linear(1024, 512)
        # self.conv3 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=2, padding=1)

        # ViT
        self.ViT = VisionTransformer(in_chans=49, q_chanel=49, embed_dim=512,
                                     depth=depth, num_heads=8, mlp_ratio=2.,
                                     drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1)

        self.se_block = SE_block(input_dim=512)
        self.head = ClassificationHead(input_dim=512, target_dim=self.num_classes)

    def forward(self, x):
        x = F.interpolate(x, size=112)

        # cnn feature branch
        x = self.ir_back(x)
        x = self.ir_layer1(x)
        # x_cnn_feature = self.conv3(x_cnn_feature)
        # x_cnn_feature = x_cnn_feature.view(B_, -1, 49).transpose(1, 2)

        y_hat = self.ViT(x)
        y_hat = self.se_block(y_hat)
        out = self.head(y_hat)

        return out
