import torch
import torch.nn as nn

from models.swin_transformer import SwinTransformer, SwinPatchFormer

class L2Norm(nn.Module):
    def __init__(self):
        super(L2Norm,self).__init__()
        self.eps = 1e-10
    def forward(self, x):
        norm = torch.sqrt(torch.sum(x * x, dim = 1) + self.eps)
        x= x / norm.unsqueeze(-1).expand_as(x)
        return x


class ConvMatchHead(nn.Module):

    def __init__(self, in_ch=128):
        super(ConvMatchHead, self).__init__()

        self.features = nn.Sequential(
            nn.Conv2d(in_ch, in_ch, kernel_size=3, stride=2, padding=1, bias = False),
            nn.BatchNorm2d(in_ch, affine=False),
            nn.ReLU(),
            nn.Conv2d(in_ch, in_ch, kernel_size=3, stride=2, padding=1, bias = False),
            nn.BatchNorm2d(in_ch, affine=False),
            nn.ReLU(),
            nn.Dropout(0.3),
            nn.Conv2d(in_ch, in_ch, kernel_size=1, bias = False),
            nn.BatchNorm2d(in_ch, affine=False)
        )

        self.norm = L2Norm()

    def forward(self, input):
        x_features = self.features(input)
        x = x_features.view(x_features.size(0), -1)
        return self.norm(x)


class SwinMatchHead(nn.Module):

    def __init__(self, in_ch=9):
        super(SwinMatchHead, self).__init__()

        self.features = SwinPatchFormer()

        self.norm = L2Norm()

    def forward(self, input):
        x_features = self.features(input)
        x = x_features.view(x_features.size(0), -1)
        return self.norm(x)


class CustomConvMatchHead(nn.Module):

    def __init__(self, in_ch=128):
        super(CustomConvMatchHead, self).__init__()

        self.features = nn.Sequential(
            nn.Conv2d(in_ch, 64, kernel_size=5, padding=2, bias = False),
            nn.BatchNorm2d(64, affine=False),
            nn.ReLU(),

            nn.Conv2d(64, 64, kernel_size=5, stride=4, padding=2, bias = False),
            nn.BatchNorm2d(64, affine=False),
            nn.ReLU(),
            nn.Conv2d(64, 64, kernel_size=5, padding=2, bias = False),
            nn.BatchNorm2d(64, affine=False),
            nn.ReLU(),

            nn.Conv2d(64, 128, kernel_size=5, stride=4, padding=2, bias = False),
            nn.BatchNorm2d(128, affine=False),
            nn.ReLU(),
            nn.Conv2d(128, 128, kernel_size=5, padding=2, bias = False),
            nn.BatchNorm2d(128, affine=False),
            nn.ReLU(),

            nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1, bias = False),
            nn.BatchNorm2d(256, affine=False),
            nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=3, padding=1, bias = False),
            nn.BatchNorm2d(256, affine=False),
            nn.ReLU(),

            nn.Dropout(0.3),
            nn.Conv2d(256, 256, kernel_size=8, stride=8, bias = False),
            nn.BatchNorm2d(256, affine=False),
        )

        self.norm = L2Norm()

    def forward(self, input):
        x_features = self.features(input)
        x = x_features.view(x_features.size(0), -1)
        return self.norm(x)


if __name__ == "__main__1":
    net = ConvMatchHead().cuda()
    des = (torch.randint(0, 10, (4, 128, 40, 40))/10).float().cuda()
    res = net(des)
    print(res.size())


if __name__ == "__main__1":
    device = torch.device("cuda:1")
    net = SwinMatchHead().to(device)
    des = (torch.randint(0, 10, (4, 9, 256, 256))/10).float().to(device)
    res = net(des)
    print(res.size())


if __name__ == "__main__":
    device = torch.device("cuda:0")
    net = CustomConvMatchHead().to(device)
    des = (torch.randint(0, 10, (4, 128, 256, 256))/10).float().to(device)
    res = net(des)
    print(res.size())
