import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.utils.data import DataLoader
import numpy as np
from scipy import stats
from copy import deepcopy

class classfiy_net(nn.Module):
    def __init__(self, input_size, hidden_size, num_classes):
        super(classfiy_net, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size).cuda()
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size, num_classes).cuda()

    def forward(self, x):
        x = x.view(x.size(0), -1).cuda()
        emb = self.fc1(x)
        emb = self.relu(emb)
        out = self.fc2(emb)
        return out, emb
    # def get_embedding_dim(self):
    #     return self.embSize

def distance(X1, X2, mu):
    Y1, Y2 = mu
    X1_vec, X1_norm_square = X1
    X2_vec, X2_norm_square = X2
    Y1_vec, Y1_norm_square = Y1
    Y2_vec, Y2_norm_square = Y2
    dist = X1_norm_square * X2_norm_square + Y1_norm_square * Y2_norm_square - 2 * (X1_vec @ Y1_vec) * (X2_vec @ Y2_vec)
    # Numerical errors may cause the distance squared to be negative.
    # assert np.min(dist) / np.max(dist) > -1e-4
    dist = np.sqrt(np.clip(dist, a_min=0, a_max=None))
    return dist


def init_centers(X1, X2, chosen, chosen_list,  mu, D2):
    if len(chosen) == 0:
        ind = np.argmax(X1[1] * X2[1])
        mu = [((X1[0][ind], X1[1][ind]), (X2[0][ind], X2[1][ind]))]
        D2 = distance(X1, X2, mu[0]).ravel().astype(float)
        D2[ind] = 0
    else:
        newD = distance(X1, X2, mu[-1]).ravel().astype(float)
        D2 = np.minimum(D2, newD)
        D2 = np.maximum(D2, 1e-10)
        D2[chosen_list] = 0
        Ddist = (D2 ** 2) / sum(D2 ** 2)
        customDist = stats.rv_discrete(name='custm', values=(np.arange(len(Ddist)), Ddist))
        ind = customDist.rvs(size=1)[0]
        while ind in chosen: ind = customDist.rvs(size=1)[0]
        mu.append(((X1[0][ind], X1[1][ind]), (X2[0][ind], X2[1][ind])))
    chosen.add(ind)
    chosen_list.append(ind)
    # print(str(len(mu)) + '\t' + str(sum(D2)), flush=True)
    return chosen, chosen_list, mu, D2

class BadgeSampling(nn.Module):
    def __init__(self, opt):
        super(BadgeSampling, self).__init__()
        self.opt = opt
        self.clf = classfiy_net(input_size=int(3*opt.photo_size**2), hidden_size=opt.hidden_dim, num_classes=opt.num_classes)

    def get_embedding(self, X, Y, return_probs=False):
        self.clf.eval()
        X, Y = torch.tensor(X).cuda(), torch.tensor(Y).cuda()
        embedding = torch.zeros([len(Y), self.opt.hidden_dim])
        probs = torch.zeros(len(Y), self.opt.num_classes)
        with torch.no_grad():
            out, e1 = self.clf(X)
            embedding = e1.data.cpu()
            if return_probs:
                pr = F.softmax(out,1)
                probs = pr.data.cpu()
        if return_probs: return embedding, probs
        return embedding
 
    def train(self, X, Y):
        optimizer = torch.optim.Adam(self.clf.parameters(), lr = self.opt.lr, weight_decay=0)
        X, Y = torch.tensor(X).cuda(), torch.tensor(Y).cuda()
        # while accCurrent < 0.99: 
        for epoch in range(self.opt.AL_epochs):
            self.clf.train()
            accFinal = 0.
            optimizer.zero_grad()
            out, e1 = self.clf(X)
            loss = F.cross_entropy(out, Y)
            loss.backward()

            # clamp gradients, just in case
            for p in filter(lambda p: p.grad is not None, self.clf.parameters()): p.grad.data.clamp_(min=-.1, max=.1)
            optimizer.step()

    def query(self, n, X, Y):
        idxs_unlabeled = np.arange(self.opt.singleCapacity)
        embs, probs = self.get_embedding(X, Y, return_probs=True)
        embs = embs.numpy()
        probs = probs.numpy()

        # the logic below reflects a speedup proposed by Zhang et al.
        # see Appendix D of https://arxiv.org/abs/2306.09910 for more details
        m = self.opt.singleCapacity
        mu = None
        D2 = None
        chosen = set()
        chosen_list = []
        emb_norms_square = np.sum(embs ** 2, axis=-1)
        max_inds = np.argmax(probs, axis=-1)

        probs = -1 * probs
        probs[np.arange(m), max_inds] += 1
        prob_norms_square = np.sum(probs ** 2, axis=-1)
        for _ in range(n):
            chosen, chosen_list, mu, D2 = init_centers((probs, prob_norms_square), (embs, emb_norms_square), chosen, chosen_list, mu, D2)
        return idxs_unlabeled[chosen_list]