import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter

cuda = True
device = torch.device("cuda" if cuda else "cpu")


def gem(x, p=3, eps=1e-6):
    return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(1./p)


class GeM(nn.Module):
    def __init__(self, p=3, eps=1e-6):
        super().__init__()
        self.p = Parameter(torch.ones(1)*p)
        self.eps = eps

    def forward(self, x):
        return gem(x, p=self.p, eps=self.eps)

    def __repr__(self):
        return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.p.data.tolist()[0]) + ', ' + 'eps=' + str(self.eps) + ')'


class Flatten(torch.nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, x):
        assert x.shape[2] == x.shape[3] == 1, f"{x.shape[2]} != {x.shape[3]} != 1"
        return x[:,:,0,0]


class L2Norm(nn.Module):
    def __init__(self, dim=1):
        super().__init__()
        self.dim = dim

    def forward(self, x):
        return F.normalize(x, p=2, dim=self.dim)


class Convs(nn.Module):
    def __init__(self, in_c, out_c,):
        super().__init__()
        self.out_c = out_c
        self.in_c = in_c
        self.relu = nn.ReLU()

        self.w = torch.Tensor(self.out_c, self.in_c, 7, 20)
        self.b = torch.Tensor(self.out_c)
        nn.init.normal_(self.w)
        nn.init.normal_(self.b)
        self.w = self.w.to(device)
        self.w.requires_grad = True
        self.b = self.b.to(device)
        self.b.requires_grad = True
        self.w = nn.Parameter(self.w)
        self.b = nn.Parameter(self.b)

    # def forward(self, grd_x, sat_x):
    #     return F.conv2d(grd_x, self.w, self.b, stride=1, padding=0), F.conv2d(sat_x, self.w, self.b, stride=1, padding=0)
    def forward(self, grd_x, sat_x):
        return self.relu(F.conv2d(grd_x, self.w, self.b, stride=1, padding=0))\
            , self.relu(F.conv2d(sat_x, self.w, self.b, stride=1, padding=0))


class NN(nn.Module):
    def __init__(self):
        super(NN, self).__init__()
        self.w1 = torch.Tensor(128, 256)
        self.b1 = torch.Tensor(1, 256, 1)
        nn.init.normal_(self.w1)
        nn.init.normal_(self.b1)
        self.w1 = self.w1.to(device)
        self.b1 = self.b1.to(device)
        self.w1.requires_grad = True
        self.b1.requires_grad = True
        self.w1 = nn.Parameter(self.w1)
        self.b1 = nn.Parameter(self.b1)

        self.w2 = torch.Tensor(256, 128)
        self.b2 = torch.Tensor(1, 128, 1)
        nn.init.normal_(self.w2)
        nn.init.normal_(self.b2)
        self.w2 = self.w2.to(device)
        self.b2 = self.b2.to(device)
        self.w2.requires_grad = True
        self.b2.requires_grad = True
        self.w2 = nn.Parameter(self.w2)
        self.b2 = nn.Parameter(self.b2)
        self.drop = nn.Dropout(p=0.2)

    def forward(self, grd_x, sat_x):
        grd_x = torch.einsum("ij,jk->ik", [grd_x, self.w1]) + torch.squeeze(self.b1, 2)
        sat_x = torch.einsum("ijk,jl->ilk", sat_x, self.w1) + self.b1
        # grd_x = F.normalize(grd_x, dim=0)
        # sat_x = F.normalize(sat_x, dim=1)
        grd_x = F.relu(grd_x)
        sat_x = F.relu(sat_x)
        grd_x = self.drop(grd_x)
        sat_x = self.drop(sat_x)
        grd_x = torch.einsum("ij,jk->ik", [grd_x, self.w2]) + torch.squeeze(self.b2, 2)
        sat_x = torch.einsum("ijk,jl->ilk", sat_x, self.w2) + self.b2
        grd_x = F.normalize(grd_x, dim=0)
        sat_x = F.normalize(sat_x, dim=1)

        return grd_x, sat_x

class NNN(nn.Module):
    def __init__(self):
        super(NNN, self).__init__()
        self.w1 = torch.Tensor(128, 256)
        self.b1 = torch.Tensor(1, 256, 1)
        nn.init.normal_(self.w1)
        nn.init.normal_(self.b1)
        self.w1 = self.w1.to(device)
        self.b1 = self.b1.to(device)
        self.w1.requires_grad = True
        self.b1.requires_grad = True
        self.w1 = nn.Parameter(self.w1)
        self.b1 = nn.Parameter(self.b1)

        self.w2 = torch.Tensor(256, 256)
        self.b2 = torch.Tensor(1, 256, 1)
        nn.init.normal_(self.w2)
        nn.init.normal_(self.b2)
        self.w2 = self.w2.to(device)
        self.b2 = self.b2.to(device)
        self.w2.requires_grad = True
        self.b2.requires_grad = True
        self.w2 = nn.Parameter(self.w2)
        self.b2 = nn.Parameter(self.b2)

        self.w3 = torch.Tensor(256, 128)
        self.b3 = torch.Tensor(1, 128, 1)
        nn.init.normal_(self.w3)
        nn.init.normal_(self.b3)
        self.w3 = self.w3.to(device)
        self.b3 = self.b3.to(device)
        self.w3.requires_grad = True
        self.b3.requires_grad = True
        self.w3 = nn.Parameter(self.w3)
        self.b3 = nn.Parameter(self.b3)

    def forward(self, grd_x, sat_x):
        grd_x = torch.einsum("ij,jk->ik", [grd_x, self.w1]) + torch.squeeze(self.b1, 2)
        sat_x = torch.einsum("ijk,jl->ilk", sat_x, self.w1) + self.b1
        # grd_x = F.normalize(grd_x, dim=0)
        # sat_x = F.normalize(sat_x, dim=1)
        grd_x = F.relu(grd_x)
        sat_x = F.relu(sat_x)
        grd_x = torch.einsum("ij,jk->ik", [grd_x, self.w2]) + torch.squeeze(self.b2, 2)
        sat_x = torch.einsum("ijk,jl->ilk", sat_x, self.w2) + self.b2
        grd_x = F.relu(grd_x)
        sat_x = F.relu(sat_x)
        grd_x = torch.einsum("ij,jk->ik", [grd_x, self.w3]) + torch.squeeze(self.b3, 2)
        sat_x = torch.einsum("ijk,jl->ilk", sat_x, self.w3) + self.b3
        grd_x = F.normalize(grd_x, dim=0)
        sat_x = F.normalize(sat_x, dim=1)

        return grd_x, sat_x

if __name__ == '__main__':
    p = Parameter(torch.ones(1) * 3)
    x = torch.rand(10,45,78)
    res = F.avg_pool2d(x.clamp(min=0.1).pow(p), (x.size(-2), x.size(-1))).pow(1./p)
    print(res.shape)
    shuffle = np.array(range(100))
    np.random.shuffle(shuffle)
    shuffle = torch.tensor(shuffle)
    batch = 16
    num = shuffle.shape[0] % 16
    shuffle = shuffle[:-num].reshape(-1, 16)
    print(shuffle)
    angle = np.array(range(100))
    angle = list(angle)
    for i in shuffle:
        print(angle[i])
        print('...........')

