import jittor
from tqdm import tqdm
from ljp.cell import SubtractorConv2D, SubtractorLinear
from ljp.metrics import correct_num


def train(model, train_loader, optimizer, desc=''):
    model.train()
    total_acc = 0
    total_num = 0
    losses = []
    tqdm_iterator = tqdm(train_loader,
                         desc=f'train {desc}',
                         ncols=200,
                         leave=True,
                         unit='batch',
                         disable=False)
    for batch_idx, (inputs, targets) in enumerate(tqdm_iterator):
        outputs = model(inputs)
        # acc = correct_num(outputs, targets)
        # total_acc += acc
        # total_num += inputs.shape[0]
        loss = jittor.nn.cross_entropy_loss(outputs, targets)
        optimizer.step(loss)
        losses.append(loss.numpy()[0])
        # tqdm_iterator.set_postfix({
        #     'loss': loss.numpy()[0],
        #     'acc': 100 * acc / inputs.shape[0],
        # })
        if batch_idx % 10 == 0:
            print('Train : [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(batch_idx, len(train_loader),
                                                                   100. * batch_idx / len(train_loader),
                                                                   loss.numpy()[0]))
    # print(f'Train acc = {100 * total_acc / total_num} %, Train loss = {sum(losses) / len(losses)}')


def train1(model, train_loader, optimizer, desc=''):
    model.train()
    lens = len(train_loader)
    for batch_idx, (inputs, targets) in enumerate(train_loader):
        outputs = model(inputs)
        loss = jittor.nn.cross_entropy_loss(outputs, targets)
        optimizer.step(loss)

        if batch_idx % 10 == 0:
            print('Train : [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(batch_idx, len(train_loader),
                                                                   100. * batch_idx / len(train_loader),
                                                                   loss.numpy()[0]))


def train_sub(model, train_loader, optimizer, desc='', alpha=1):
    model.train()
    tqdm_iterator = tqdm(train_loader,
                         desc=f'train {desc}',
                         ncols=200,
                         leave=True,
                         unit='batch',
                         disable=False)
    for batch_idx, (inputs, targets) in enumerate(tqdm_iterator):
        outputs = model(inputs)

        regterms, summ = 0, 0
        for name, module in model.named_modules():
            if isinstance(module, SubtractorConv2D) or isinstance(module, SubtractorLinear):
                # print(name, module.regterm)
                regterms += module.regterm
                summ += 1
        # loss = jittor.nn.cross_entropy_loss(outputs, targets) + alpha * sum(regterms) / len(regterms)
        loss = alpha * regterms / summ
        # loss = jittor.nn.cross_entropy_loss(outputs, targets)
        # print(loss)
        optimizer.step(loss)
        # losses.append(loss)
        # if batch_idx % 10 == 0:
        #     print('Train : [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(batch_idx, len(train_loader),
        #                                                            100. * batch_idx / len(train_loader),
        #                                                            loss.numpy()[0]))
        # print(loss, 100 * acc / inputs.shape[0])
    # print(f'Train acc = {100 * total_acc / total_num} %, Train loss = {sum(losses) / len(losses)}')
    # print(f'Train acc = {100 * total_acc / total_num} %')


# @jittor.no_grad()
def val(model, val_loader):
    model.eval()
    total_acc = 0
    total_num = 0
    for inputs, targets in val_loader:
        batch_size = inputs.shape[0]
        outputs = model(inputs)
        acc = correct_num(outputs, targets)
        total_acc += acc
        total_num += batch_size
    print(f'Test Acc = {100 * total_acc / total_num} %')
