import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Iterable
from torch import optim
from timm.utils import accuracy
import numpy as np
import sys

# Deprecated due to the backward process is different from the conventional deep learning.
# from timm.utils import NativeScaler

# global frozen variable
freeze_num_expert = []
tmp_model_para = {}
"""
Train and eval functions.
"""
def train_one_epoch_frozen(
    model: nn.Module,
    criterion: nn.Module,
    data_loader: Iterable,
    optimizer: optim.Optimizer,
    device: torch.device,
    expert_loss_coeff: float,
    freeze_num_expert:list=freeze_num_expert,
    expert_limit:int = 5,
):
    """Train the model on the given data_loader for 1 epoch and freeze one(or more) expert for 1 epoch.

    Args:
        model (nn.Module): _description_
        criterion (nn.Module): _description_
        data_loader (Iterable): _description_
        optimizer (optim.Optimizer): _description_
        device (torch.device): _description_
        expert_loss_coeff (float): the coefficient of expert loss
        freeze_num_expert (list): the list of expert num that need to be frozen, every item is a tuple [xx,xxx] where xx is name and xxx is expert id
    Returns:
        the AVG training loss
    """
    # prepare freeze node list
    # if frozen experts too many
    if len(freeze_num_expert) >= expert_limit:
        del(freeze_num_expert[0])
    frozen_name = [x[0] for x in freeze_num_expert]
    frozen_expert = [x[1] for x in freeze_num_expert]
    # Switch to training mode
    model.to(device)
    model.train()
    ce_losses = []
    expert_losses = []
    # store the parameters before training
    if len(tmp_model_para) == 0:
        for name,param in model.named_parameters():
            if 'experts' in name:
                tmp_model_para[name] = param.detach()
    # Start the inner loop (batch)
    for samples, targets in data_loader:
        samples = samples.to(device, non_blocking=True)
        targets = targets.to(device, non_blocking=True)
        # use auto mixed precision to accerate the training procedure.
        with torch.cuda.amp.autocast():
            outputs = model.forward(samples)
            if isinstance(outputs, tuple):
                logits, aux_losses = outputs
                ce_loss = criterion.forward(logits, targets)
                expert_loss = sum(aux_losses) * expert_loss_coeff
                loss = ce_loss + expert_loss
            else:
                ce_loss = criterion.forward(outputs, targets)
                loss = ce_loss
                expert_loss = torch.tensor(0)
        # If the loss value is infinite, stop
        loss_value = loss.item()
        if not np.isfinite(loss_value):
            print("Loss is {}, stopping training".format(loss_value))
            sys.exit(1)
        # freeze some experts
        for i in range(len(frozen_name)):
            for name,param in model.named_parameters():
                if frozen_name[i][:-1] in name:
                    param[frozen_expert[i]].grad.zero_()
        # for name,param in model.named_parameters():
        #     if name in frozen_name:
        #         index_expert = frozen_name.index(name)
        #         for inn in frozen_expert[index_expert]:
        #             param[inn].grad.zero_()
        
        # optimizer cleans up and step
        optimizer.zero_grad()
        torch.cuda.synchronize()
        loss.backward()
        optimizer.step()

        # Record the loss
        ce_losses.append(ce_loss.item())
        expert_losses.append(expert_loss.item())
    #start comparing and freezing experts
    similarity = 1
    similarity_index = [] # xx,xxx -- level,expert_id
    cos = torch.nn.CosineSimilarity(dim=0)
    for key in tmp_model_para:
        if 'w2' in key:
            continue
        for i in range(len(tmp_model_para[key])):
            tmp_similar = cos((tmp_model_para[key][i]),(model.state_dict()[key][i])).mean().item()
            # get the w2 name
            keyy = key[:-1] + '2'
            tmp_similar += cos((tmp_model_para[keyy][i]),(model.state_dict()[keyy][i])).mean().item()
            tmp_similar = tmp_similar/2
            if tmp_similar < similarity:
                similarity = tmp_similar
                similarity_index = [key,i]
    print(len(freeze_num_expert))
    if len(similarity_index) == 2:
        freeze_num_expert.append(similarity_index)
        for name,param in model.named_parameters():
            if 'experts' in name:
                tmp_model_para[name] = param.detach()

    return np.mean(ce_losses), np.mean(expert_losses)

def train_one_epoch(
    model: nn.Module,
    criterion: nn.Module,
    data_loader: Iterable,
    optimizer: optim.Optimizer,
    device: torch.device,
    expert_loss_coeff: float,
):
    """Train the model on the given data_loader for 1 epoch.

    Args:
        model (nn.Module): _description_
        criterion (nn.Module): _description_
        data_loader (Iterable): _description_
        optimizer (optim.Optimizer): _description_
        device (torch.device): _description_
        expert_loss_coeff (float): the coefficient of expert loss
    Returns:
        the AVG training loss
    """
    # Switch to training mode
    model.to(device)
    model.train()
    ce_losses = []
    expert_losses = []
    # Start the inner loop (batch)
    for samples, targets in data_loader:
        samples = samples.to(device, non_blocking=True)
        targets = targets.to(device, non_blocking=True)
        # use auto mixed precision to accerate the training procedure.
        with torch.cuda.amp.autocast():
            outputs = model.forward(samples)

            if isinstance(outputs, tuple):
                logits, aux_losses = outputs
                ce_loss = criterion.forward(logits, targets)
                expert_loss = sum(aux_losses) * expert_loss_coeff
                loss = ce_loss + expert_loss
            else:
                ce_loss = criterion.forward(outputs, targets)
                loss = ce_loss
                expert_loss = torch.tensor(0)
        # If the loss value is infinite, stop
        loss_value = loss.item()
        if not np.isfinite(loss_value):
            print(
                "Expert loss is {}, network loss is {}, stopping training".format(
                    expert_loss, ce_loss
                )
            )
            sys.exit(1)
        # optimizer cleans up and step
        optimizer.zero_grad()
        torch.cuda.synchronize()
        loss.backward()
        # gradient clip and norm
        nn.utils.clip_grad_norm_(model.parameters(), 1.0)
        optimizer.step()

        # Record the loss
        ce_losses.append(ce_loss.item())
        expert_losses.append(expert_loss.item())

    return np.mean(ce_losses), np.mean(expert_losses)


@torch.no_grad()
def evalulate(model: nn.Module, data_loader: Iterable, device: torch.device):
    """Evaluate the model on the given data_loader.

    Args:
        model (nn.Module): _description_
        data_loader (Iterable): _description_
        device (torch.device): _description_
    Returns:
        the test loss and a dictionary containing the testing acc and other metrics.
    """
    # switch to eval model
    model.to(device)
    model.eval()
    criterion = nn.CrossEntropyLoss()

    losses = []
    results: dict = {
        "acc_top5": [],
        "acc_top1": [],
    }
    # start the main loop
    for images, targets in data_loader:
        images = images.to(device, non_blocking=True)
        targets = targets.to(device, non_blocking=True)
        # comput the outputs
        with torch.cuda.amp.autocast():
            outputs = model(images)
            if isinstance(outputs, tuple):
                logits, aux_losses = outputs
                ce_loss = criterion.forward(logits, targets)
                expert_loss = sum(aux_losses)
                loss = ce_loss + expert_loss
            else:
                logits = outputs
                ce_loss = criterion.forward(logits, targets)
                loss = ce_loss
                expert_loss = 0
        # calculate the top1 and top5 accuracy
        acc1, acc5 = accuracy(logits, targets, topk=(1, 5))
        loss_value = loss.item()
        # log
        losses.append(loss_value)
        results["acc_top1"].append(acc1.item())
        results["acc_top5"].append(acc5.item())

    loss_final = np.mean(losses)
    results["acc_top1"] = np.mean(results["acc_top1"])
    results["acc_top5"] = np.mean(results["acc_top5"])
    return loss_final, results
