from tqdm import tqdm
from functools import reduce
from utils import get_layers

import torch
import torch.nn as nn
import torch.nn.functional as F
import utils


def train_iter(model, train_loader, optimizer, epoch, loss_coef=1, penalty=None):

    model.train()

    logger = utils.MetricLogger(delimiter="  ")
    header = 'Epoch: [{}]'.format(epoch)

    for input, label in logger.log_every(train_loader, 1000, header):
        input = input.cuda()
        label = label.cuda()

        optimizer.zero_grad(set_to_none=True)

        output = model(input)
        loss = F.cross_entropy(output, label)
        assert not loss.isnan(), "loss is nan"
        
        loss *= loss_coef

        if penalty is not None:
            _penalty = penalty(model, input, output, label)
            assert not _penalty.isnan(), "penalty is nan"

            loss += _penalty

        loss.backward()
        optimizer.step()
        logger.update(loss=loss.item())
    
    return logger.meters['loss'].global_avg



@torch.no_grad()
def test_iter(model, test_loader, topk=1, need_loss=False):
    model.eval()
    topk_list = [topk] if isinstance(topk, int) else topk
    maxk = max(topk_list)
    correct = [0 for _ in range(maxk)]
    total = 0
    loss = 0
    for input, label in tqdm(test_loader):
        input = input.cuda()
        label = label.cuda()

        output = model(input)
        
        if need_loss:
            loss += F.cross_entropy(output, label)

        _, predicted = output.topk(maxk, 1, True, True)
        predicted = predicted.t()
        correct_k = predicted.eq(label.view(1, -1).expand_as(predicted))
        for k in topk_list:
            correct[k-1] += correct_k[:k].sum().item()
        total += label.size(0)
    acc_list = [correct[k-1] / total for k in topk_list]
    accuracy = acc_list[0] if isinstance(topk, int) else acc_list
    loss /= total
    return (accuracy, loss) if need_loss else accuracy


def distillation(teacher, student, train_loader, 
                      layer_groups, idx, lr=0.01, penalty=None, epochs=5):
    teacher.eval()
    student.train()

    # freeze teacher
    for p in teacher.parameters():
        p.requires_grad = False

    teacher_prevlayers = nn.Sequential(
        *reduce(lambda x, y: x + y, 
               [get_layers(teacher, layer_groups[i]) for i in range(idx)], [])
    )
    
    teacher_layer = nn.Sequential(*get_layers(teacher, layer_groups[idx]))
    student_layer = nn.Sequential(*get_layers(student, layer_groups[idx]))

    optimizer = torch.optim.SGD(
        student_layer.parameters(), lr=lr, momentum=0.9, weight_decay=1e-4)

    for _ in tqdm(range(epochs)):
        for input, _ in train_loader:
            input = input.cuda()
            optimizer.zero_grad(set_to_none=True)

            with torch.no_grad():
                input = teacher_prevlayers(input)
                teacher_output = teacher_layer(input)

            student_output = student_layer(input)
            loss = F.mse_loss(student_output, teacher_output, reduction='mean')
            if penalty is not None:
                loss += penalty(student_layer, input, teacher_output)
            loss.backward()
            optimizer.step()
