#%%
import torch
import torch.nn as nn
import torch.nn.functional as F

import os
import sys
import pandas as pd
import numpy as np
from tqdm import tqdm, trange
from matplotlib import pyplot as plt
import seaborn as sns
import json
import pathlib
from pathlib import Path

#%%
class NetVLAD(nn.Module):
    def __init__(self, num_cluster=64, dim=512, alpha=100.0, normalize_input=True):
        super(NetVLAD, self).__init__()
        self.num_cluster = num_cluster
        self.dim = dim
        self.alpha = alpha
        self.normalize_input = normalize_input
        # self.conv = nn.Conv2d(dim, num_cluster, kernel_size=(1, 1), biased=False)
        self.conv = nn.Conv2d(dim, num_cluster, kernel_size=(1, 1), bias=False)
        self.centroids = nn.Parameter(torch.rand(num_cluster, dim), requires_grad=True)

        self.clsts = None
        self.traindescs = None

    def _init_params(self):
        clstsAssign = self.clsts / np.linalg.norm(
            self.clsts, axis=1, keepdims=True
        )  # 保留矩阵二维特性，矩阵每个行向量求向量的2范数。
        dots = np.dot(clstsAssign, self.traindescs.T)
        dots.sort(0)
        dots = dots[::-1, :]  # sort descending
        self.alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :])).item()
        self.centroids.data.copy_(torch.from_numpy(self.clsts))
        self.conv.weight.data.copy_(
            torch.from_numpy(self.alpha * clstsAssign).unsqueeze(2).unsqueeze(3)
        )

    def forward(self, x):
        N, C = x.shape[:2]
        if self.normalize_input:
            x = F.normalize(x, p=2, dim=1)

        # soft assignment
        soft_assign = self.conv(x).view(N, self.num_cluster, -1)
        soft_assign = F.softmax(soft_assign, dim=1)  # change to probability lists

        x_flatten = x.view(N, C, -1)

        # calculate residuals to each clusters in one loop

        # 将所有的x数据复制64份，转化为向量,然后分别计算残差，最终结果，每个残差占据一个元素
        residual = x_flatten.expand(self.num_cluster, -1, -1, -1).permute(
            1, 0, 2, 3
        ) - self.centroids.expand(x_flatten.size(-1), -1, -1).permute(
            1, 2, 0
        ).unsqueeze(
            0
        )

        residual *= soft_assign.unsqueeze(2)
        vlad = residual.sum(dim=-1)

        return vlad


class EmbedNet(nn.Module):
    def __init__(self, base_model, net_vlad):
        super(EmbedNet, self).__init__()
        self.base_model = base_model
        self.net_vlad = net_vlad

    def _init_params(self):
        self.base_model._init_params()
        self.net_vlad._init_params()

    def forward(self, x):
        pool_x, x = self.base_model(x)
        vlad_x = self.net_vlad(x)

        # normalize
        vlad_x = F.normalize(vlad_x, p=2, dim=2)  # intro-normalization
        vlad_x = vlad_x.view(x.size(0), -1)
        vlad_x = F.normalize(vlad_x, p=2, dim=1)

        return pool_x, vlad_x


class EmbedNet(nn.Module):
    def __init__(self, base_model, net_vlad, dim=4096):
        super(EmbedNet, self).__init__()
        self.base_model = base_model
        self.net_vlad = net_vlad
        self.pca_layer = nn.Conv2d(
            net_vlad.num_cluster * net_vlad.dim, dim, 1, stride=1, padding=0
        )

    def _init_params(self):
        self.base_model._init_params()
        self.net_vlad._init_params()

    def forward(self, x):
        _, x = self.base_model(x)
        vlad_x = self.net_vlad(x)

        # [IMPORTANT] normalize
        vlad_x = F.normalize(vlad_x, p=2, dim=2)  # intra-normalize
        vlad_x = vlad_x.view(x.size(0), -1)
        vlad_x = F.normalize(vlad_x, p=2, dim=1)  # L2 normalize

        # reduction
        N, D = vlad_x.size()
        vlad_x = vlad_x.view(N, D, 1, 1)
        vlad_x = self.pca_layer(vlad_x).view(N, -1)
        vlad_x = F.normalize(vlad_x, p=2, dim=-1)

        return vlad_x
