import os

import torch
from tqdm import tqdm

from code.utils.utils import get_lr

# 采用Contrastive Loss 的训练 后续会将triplet 一起集成
def train_one_epoch(model_train, model, loss, optimizer, epoch, epoch_step, gen, Epoch,
                    cuda, Batch_size, fp16, scaler, save_period, save_dir, local_rank):
    """
    parameters:
        gen:         自己的数据集中的训练集
        gen_val:     自己的数据集中的验证集
        Epoch:       总的训练轮数
        Batch_size:  目前是48
    """
    # 总损失初始化
    total_contrastive_loss = 0

    if local_rank == 0:
        print('Start Train')
        pbar = tqdm(total=epoch_step, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict, mininterval=0.3)
    model_train.train()

    for iteration, batch in enumerate(gen):
        if iteration >= epoch_step:
            break
        imageA, imageB, label = batch
        with torch.no_grad():
            if cuda:
                images = images.cuda(local_rank)
                label = label.cuda(local_rank)

        optimizer.zero_grad()
        if not fp16:
            # 预训练的模型返回的输出
            output1 = model_train(imageA)
            output2 = model_train(imageB)
            # 计算损失
            _contrastive_loss = loss(output1, output2, label)
            _contrastive_loss.backward()   # 向后传播
            optimizer.step()
        else:
            from torch.cuda.amp import autocast
            with autocast():
                output = model_train(images)

                _contrastive_loss = loss(output, Batch_size)
            # ----------------------#
            #   反向传播
            # ----------------------#
            scaler.scale(_contrastive_loss).backward()
            scaler.step(optimizer)
            scaler.update()

        total_contrastive_loss += _contrastive_loss.item()

        if local_rank == 0:
            pbar.set_postfix(**{'total_contrastive_loss': total_contrastive_loss / (iteration + 1),
                                'lr': get_lr(optimizer)})
            pbar.update(1)

    if local_rank == 0:
        pbar.close()
        print('Finish Train')
        model_train.eval()

        # 保存权重
        print('Epoch:' + str(epoch + 1) + '/' + str(Epoch))
        print('Total Loss: %.4f' % (total_contrastive_loss/ epoch_step))
        if (epoch + 1) % save_period == 0 or epoch + 1 == Epoch:
            torch.save(model.state_dict(), os.path.join(save_dir, 'ep%03d-loss%.6f.pth'%((epoch + 1), total_contrastive_loss / epoch_step)))

