import argparse
import torch
from torch import nn
import numpy as np
from sklearn.metrics import average_precision_score, accuracy_score
from itertools import cycle
from utils import *

def MIM_loss(feats1, feats2, temp=0.5):



    feats1_norm = torch.norm(feats1, dim=1).reshape(-1, 1)
    feats2_norm = torch.norm(feats2, dim=1).reshape(-1, 1)

    feats1_norm = torch.div(feats1, feats1_norm)
    feats2_norm = torch.div(feats2, feats2_norm)
    feats12 = torch.cat((feats1_norm, feats2_norm))
    feats21 = torch.cat((feats2_norm, feats1_norm))

    similarities = torch.mm(feats12, feats12.T)
    sim_by_tau = torch.div(similarities, temp)
    exp_sim_by_tau = torch.exp(sim_by_tau)
    sum_of_rows = torch.sum(exp_sim_by_tau, dim=1)
    exp_sim_by_tau_diag = torch.diag(exp_sim_by_tau)

    numerators = torch.exp(torch.div(torch.nn.CosineSimilarity()(feats12, feats21), temp))
    denominators = sum_of_rows - exp_sim_by_tau_diag
    num_by_den = torch.div(numerators,denominators)
    neglog_num_by_den = -torch.log(num_by_den)

    # similarities = torch.mm(feats_s12, feats_s12.T)
    # print("sim shape", similarities.shape)
    # print("sim ", similarities)
    # similarities = torch.exp(torch.div(similarities, temp))

    return torch.mean(neglog_num_by_den)


class Trainer:
    def __init__(self, model_dict, device, model_mixup_KD, data_mixup, KD_hyperparam=0.1, patch_size=16):
        self.models = model_dict
        self.criterion = nn.MSELoss()
        self.device = device
        self.alpha = 0.4
        self.model_mixup_KD = model_mixup_KD
        self.data_mixup = data_mixup
        self.KD_hyperparam = KD_hyperparam
        self.patch_size=patch_size

        if model_mixup_KD != None:
            self.convex_encoder = get_old_encoder(model=model_dict['train'],device=device, patch_size=self.patch_size)#copy.deepcopy(self.models['KD_encoder']).to(device, non_blocking=True)


    def train_epoch(self, dataloaders, optimizer, print_loss=False):
        self.models['train'].train()
        if 'KD_encoder' in self.models:
            self.models['KD_encoder'].train()
        running_loss = 0.0
        epoch_loss = 0.0
        
        for i, batch in enumerate(dataloaders['train'] if (self.data_mixup == None) else zip(dataloaders['train'],cycle(dataloaders['memory']))):

    
            if self.data_mixup == None:
                    
                task_batch = batch
                images = task_batch[0][0].to(self.device)
                model_output= self.models['train'](images)

            else:   
                task_batch = batch[0]
                images = task_batch[0][0].to(self.device)         
                memory_batch = batch[1]
                memory_images = memory_batch[0][0].to(self.device)

                if self.data_mixup == 'beta':
                    lam = np.random.beta(0.4, 0.4)
                elif self.data_mixup == 'unif':
                    lam = np.random.uniform(0,1)
                else:
                    try:
                        lam = float(self.data_mixup)
                    except ValueError:
                        raise argparse.ArgumentTypeError(f"Invalid value: {self.data_mixup}. Must be None, 'beta', 'unif', or a float.")
                #if isinstance(self.data_mixup, float):
                #    lam = self.data_mixup

                #print('train with data mixup:', lam)

                
                batch_size = min(images.shape[0],memory_images.shape[0])
                mixed_images = lam * images[:batch_size] + (1-lam) * memory_images[:batch_size]

                model_output= self.models['train'](mixed_images)
            
            predictions, targets = model_output[0], model_output[1]
            rec_loss = self.criterion(predictions, targets)
            loss = rec_loss

            if self.model_mixup_KD != None:
                if self.data_mixup == None:
                    
                    images_KD = task_batch[0][1].to(self.device)
                    
                else:   
                    images_KD = task_batch[0][1].to(self.device)         
                    memory_images_KD = memory_batch[0][1].to(self.device)

                    #if self.data_mixup == 'beta':
                    #    lam = np.random.beta(0.4, 0.4)
                    #if self.data_mixup == 'unif':
                    #    lam = np.random.uniform(0,1)
                    #if isinstance(self.data_mixup, float):
                    #    lam = self.data_mixup
                
                    batch_size = min(images_KD.shape[0],memory_images_KD.shape[0])
                    images_KD = lam * images_KD[:batch_size] + (1-lam) * memory_images_KD[:batch_size]

                if self.model_mixup_KD == 'beta':
                    lam2 = np.random.beta(0.4, 0.4)
                elif self.model_mixup_KD == 'unif':
                    lam2 = np.random.uniform(0,1)
                else:
                    try:
                        lam = float(self.data_mixup)
                    except ValueError:
                        raise argparse.ArgumentTypeError(f"Invalid value: {self.data_mixup}. Must be None, 'beta', 'unif', or a float.")
                #if isinstance(self.model_mixup_KD, float):
                #    lam2 = self.model_mixup_KD               
                
                #print('train with model mixup KD:', lam2)
                    
                with torch.no_grad():
                    for model_convex, model, KDencoder in zip(self.convex_encoder.parameters(), self.models['train'].backbone.parameters(), self.models['KD_encoder'].parameters()):
                        model_convex.data = model.data * lam2 + KDencoder.data * (1.0 - lam2)
                    encodings_KD = self.convex_encoder(images_KD)

                    self.convex_encoder.load_state_dict(self.models['KD_encoder'].state_dict()) #copy.deepcopy(self.models['KD_encoder']).to(self.device, non_blocking=True)#self.models['KD_encoder'].data * (1.0 - lam)
                    
                    feature_encs_KD = encodings_KD['encoder'][:, 0]
                        
                    
                #rec_loss = self.criterion(predictions, targets)
                projections = model_output[3]
                mim_loss = MIM_loss(projections, feature_encs_KD.detach())

                    #print('rec_loss: ' + str(rec_loss.item()))
                    #print('mim_loss: ' + str(mim_loss.item()))

                loss = loss + self.KD_hyperparam * mim_loss
            
            running_loss += loss.item()
            epoch_loss += loss.item()
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            if print_loss & (i % 10 == 9):  # print every 10 mini-batches
                print(f"[{i + 1:5d}] loss: {running_loss / 10:.4f}")
                running_loss = 0.0        

        avg_epoch_loss = epoch_loss/len(dataloaders['train'])
        return avg_epoch_loss
            



    def test(self, dataloader):
        self.models['train'].eval()
        with torch.no_grad():
            running_loss = 0.0
            output_list = []
            for batch in dataloader:
                views = batch[0]
                images = views.to(self.device)  # views contains only a single view
                model_outputs = self.models['train'](images)
                predictions, targets = model_outputs[0],model_outputs[1]
                loss = self.criterion(predictions, targets)
                output_list += [{"loss": loss.item()}]
   
                running_loss += loss.item()
            #avg_loss = running_loss/len(dl)
        return output_list
    



    def train(self, dataloaders, optimizer, scheduler, writer=None, eval=10, num_epochs=1, print_loss=True, best_eval=False, start_eval=0, exp_name='Test', trial=0):
        print('>> Start Training...')
        min_loss = float('inf')
        for epoch in range(num_epochs):
            avg_epoch_loss = self.train_epoch(dataloaders, optimizer, print_loss)
            if scheduler != None:
                scheduler.step()
            if writer != None:
              writer.add_scalar('train loss', avg_epoch_loss, epoch)  
            if eval != None:
                if epoch >= start_eval:
                    if epoch % eval == 0:
                        print('Eval...')
                        val_output = self.test(dataloaders['val'])
                        
                        
                        metrics = {"avg_loss": np.stack([x["loss"] for x in val_output],0).mean()}
                        if best_eval:
                            if min_loss > metrics['avg_loss']:
                                min_loss = metrics['avg_loss']
                                torch.save({'epoch':epoch,
                                            'model_state_dict':self.models['train'].state_dict(),
                                            'loss': min_loss,
                                            #'optimizer_state': optimizer.state_dict()
                                            }, '{}_pretraining_trial_{}_best.pt'.format(exp_name, trial))
                        if writer != None:
                            for metric, value in metrics.items():
                                #print(metric)
                                #print(value)
                                if metric != 'ap_classwise':
                                    writer.add_scalar(metric, value, epoch)
        if writer!= None:
            writer.flush()
            writer.close()
        print('>> Finished.')


def get_metrics(outputs, criterion):
    if criterion.__name__ == 'binary_cross_entropy_with_logits':
        avg_loss = np.stack([x["loss"] for x in outputs],0).mean() #.cpu() for x in outputs],0).mean()
        logits = np.concatenate([x["outputs"].cpu().sigmoid() for x in outputs],0)
        labels = np.concatenate([x["labels"].cpu() for x in outputs],0)
        ap_macro = average_precision_score(labels,logits,average='macro')
        ap_micro = average_precision_score(labels,logits,average='micro')
        ap_classwise = average_precision_score(labels,logits,average=None)

        return {
            "avg_loss": avg_loss,
            "map_macro": ap_macro,
            "map_micro": ap_micro,
            "ap_classwise": ap_classwise
        }

    elif criterion.__name__ ==  'cross_entropy':
        avg_loss = np.stack([x["loss"] for x in outputs],0).mean() #.cpu() for x in outputs],0).mean()
        pred = np.concatenate([torch.argmax(x["outputs"].cpu(),dim=1) for x in outputs],0)
        labels = np.concatenate([x["labels"].cpu() for x in outputs],0)
        acc = accuracy_score(labels, pred)

        return {
            "avg_loss": avg_loss,
            "acc":acc
        }



def eval_model(
    model: torch.nn.Module,
    dl: torch.utils.data.DataLoader,
    loss_fn,
    device: torch.device,
    classes: int,
    ):
    model.eval()
    with torch.no_grad():
        output_list = []
        for i, batch in enumerate(dl):
            img, label = batch
            img = img.to(device)
            label = label.to(device)

            output = model(img)
            loss = loss_fn(output, label)
            output_list += [{"loss": loss, "outputs": output, "labels": label}]
    metrics = get_metrics(output_list)
    return metrics