import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter

cuda = True
device = torch.device("cuda" if cuda else "cpu")

def gem(x, p=3, eps=1e-6):
    return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(1./p)


class GeM(nn.Module):
    def __init__(self, p=3, eps=1e-6):
        super().__init__()
        self.p = Parameter(torch.ones(1)*p)
        self.eps = eps

    def forward(self, x):
        return gem(x, p=self.p, eps=self.eps)

    def __repr__(self):
        return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.p.data.tolist()[0]) + ', ' + 'eps=' + str(self.eps) + ')'


class Flatten(torch.nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, x):
        assert x.shape[2] == x.shape[3] == 1, f"{x.shape[2]} != {x.shape[3]} != 1"
        return x[:,:,0,0]


class L2Norm(nn.Module):
    def __init__(self, dim=1):
        super().__init__()
        self.dim = dim

    def forward(self, x):
        return F.normalize(x, p=2, dim=self.dim)


class Convs(nn.Module):
    def __init__(self, in_c, out_c,):
        super().__init__()
        self.out_c = out_c
        self.in_c = in_c
        self.relu = nn.ReLU()
        self.drop = nn.Dropout(0.2)

        self.w = torch.Tensor(self.out_c, self.in_c, 7, 20)
        self.b = torch.Tensor(self.out_c)
        nn.init.normal_(self.w)
        nn.init.normal_(self.b)
        self.w = self.w.to(device)
        self.w.requires_grad = True
        self.b = self.b.to(device)
        self.b.requires_grad = True
        self.w = nn.Parameter(self.w)
        self.b = nn.Parameter(self.b)


    def forward(self, grd_x, sat_x):
        # self.w = torch.Tensor(self.out_c, self.in_c, grd_x.shape[2], grd_x.shape[3])
        # self.b = torch.Tensor(self.out_c)
        # nn.init.normal_(self.w)
        # nn.init.normal_(self.b)
        # self.w = self.w.to(device)
        # self.w.requires_grad = True
        # self.b = self.b.to(device)
        # self.b.requires_grad = True
        # self.w = nn.Parameter(self.w)
        # self.b = nn.Parameter(self.b)

        return self.relu(F.conv2d(grd_x, self.w, self.b, stride=1, padding=0)), self.relu(
            F.conv2d(sat_x, self.w, self.b, stride=1, padding=0))

        # return F.conv2d(grd_x, self.w, self.b, stride=1, padding=0), F.conv2d(sat_x, self.w, self.b, stride=1,
        #                                                                      padding=0)


class NN(nn.Module):
    def __init__(self):
        super(NN, self).__init__()
        self.w1 = torch.Tensor(128, 256)
        self.b1 = torch.Tensor(1, 256, 1)
        nn.init.normal_(self.w1)
        nn.init.normal_(self.b1)
        self.w1 = self.w1.to(device)
        self.b1 = self.b1.to(device)
        self.w1.requires_grad = True
        self.b1.requires_grad = True
        self.w1 = nn.Parameter(self.w1)
        self.b1 = nn.Parameter(self.b1)

        self.w2 = torch.Tensor(256, 128)
        self.b2 = torch.Tensor(1, 128, 1)
        nn.init.normal_(self.w2)
        nn.init.normal_(self.b2)
        self.w2 = self.w2.to(device)
        self.b2 = self.b2.to(device)
        self.w2.requires_grad = True
        self.b2.requires_grad = True
        self.w2 = nn.Parameter(self.w2)
        self.b2 = nn.Parameter(self.b2)

    def forward(self, grd_x, sat_x):
        grd_x = torch.einsum("j,jk->k", [grd_x, self.w1]) + torch.squeeze(self.b1)
        sat_x = torch.einsum("ijk,jl->ilk", sat_x, self.w1) + self.b1

        grd_x = F.relu(grd_x)
        sat_x = F.relu(sat_x)
        grd_x = torch.einsum("j,jk->k", [grd_x, self.w2]) + torch.squeeze(self.b2)
        sat_x = torch.einsum("ijk,jl->ilk", sat_x, self.w2) + self.b2


        return F.relu(grd_x), F.relu(sat_x)


