# -*- coding: UTF-8 -*-
# *******************************************************************
# File Name: point2point
# > Author: 04000387
# > Created Time: 2025/1/7 9:44
# *******************************************************************
from torch import nn
import torch
from torchvision.models import vgg16_bn


class Block(nn.Module):
    def __init__(self, in_channel):
        super().__init__()

        self.cnn = nn.Sequential(
            nn.Conv2d(in_channel, 64, kernel_size=3, padding=1),
            nn.InstanceNorm2d(64),
            nn.ReLU(),
            nn.Conv2d(64, 64, kernel_size=3, padding=1),
            nn.InstanceNorm2d(64),
            nn.Conv2d(64, out_channels=in_channel, padding=1, kernel_size=3)
        )
        self.active = nn.ReLU()

    def forward(self, x):
        return self.active(self.cnn(x) + x)


class Point2PointModel(nn.Module):
    def __init__(self, in_channel=3, out_channel=512):
        super().__init__()
        self.align = nn.Conv2d(in_channel, 64, kernel_size=1, stride=1)

        self.blocks = nn.Sequential(
            Block(64),
            nn.Conv2d(64, 256, kernel_size=3, padding=1, stride=2),
            Block(256),
            nn.Conv2d(256, 512, 3, 2, padding=1),
            nn.BatchNorm2d(512),
            nn.Conv2d(512, out_channel, kernel_size=3, stride=2, padding=1)
        )

        self.flatten = nn.Flatten(2, -1)

    def forward(self, x):
        x = self.align(x)
        x = self.blocks(x)
        x = self.flatten(x).permute(0, 2, 1)
        return x


class EncoderModule(nn.Module):
    def __init__(self, out_feature=512, num_layer=1):
        super().__init__()
        self.feature = vgg16_bn().features
        self.align = nn.Conv2d(512, out_feature, 3, 1, 1)
        layer = nn.TransformerEncoderLayer(out_feature, 8, out_feature, batch_first=True)
        self.encoder = nn.TransformerEncoder(layer, num_layer)

        self.class_token = nn.Parameter(torch.zeros(1, 1, out_feature))

        self.pos_embedding = nn.Parameter(torch.empty(1, 50, out_feature).normal_(std=0.02))

        self.flatten = nn.Flatten(2, -1)

    def forward(self, x):
        x = self.feature(x)
        x = self.align(x)
        x = self.flatten(x).permute(0, 2, 1)
        batch_class_token = self.class_token.expand((x.size(0), -1, -1))
        x = torch.cat((batch_class_token, x), dim=1) + self.pos_embedding
        x = self.encoder(x)
        return x
