import jittor
import numpy
from tqdm import tqdm
from ljp.cell import SubtractorConv2D, SubtractorLinear


def training(model, dataloader, optimizer=jittor.nn.Adan, loss_func=jittor.nn.cross_entropy_loss, desc=''):
    model.train()
    tqdm_iterator = tqdm(dataloader,
                         desc=f'Training {desc}',
                         ncols=200,
                         leave=True,
                         unit='batch',
                         disable=False)
    for inputs, targets in tqdm_iterator:
        outputs = model(inputs)
        batch_loss = loss_func(outputs, targets)
        optimizer.step(batch_loss)


def training_sub(model, dataloader, optimizer=jittor.nn.Adan, loss_func=jittor.nn.cross_entropy_loss, alpha=0.01,
                 desc=''):
    model.train()
    tqdm_iterator = tqdm(dataloader,
                         desc=f'Training {desc}',
                         ncols=200,
                         leave=True,
                         unit='batch',
                         disable=False)
    for inputs, targets in tqdm_iterator:
        outputs = model(inputs)
        regterms, summ = 0, 0
        for name, module in model.named_modules():
            if isinstance(module, SubtractorConv2D) or isinstance(module, SubtractorLinear):
                regterms += module.regterm
                summ += 1
        batch_loss = loss_func(outputs, targets) + alpha * sum(regterms) / len(regterms)
        optimizer.step(batch_loss)


@jittor.no_grad()
def testing(model, dataloader, desc=''):
    model.eval()
    tqdm_iterator = tqdm(dataloader,
                         desc=f'Testing {desc}',
                         ncols=200,
                         leave=True,
                         unit='batch',
                         disable=False)
    trues, preds = [], []
    for inputs, targets in tqdm_iterator:
        outputs = model(inputs)
        preds.append(outputs.numpy())
        trues.append(targets.numpy())
    return numpy.concatenate(preds, axis=0), numpy.concatenate(trues, axis=0)
