import os
import torch
from tqdm import tqdm

# 采用Contrastive Loss 的训练 后续会将triplet 一起集成
from src.utils import get_lr


def train_one_epoch(model_train, model, loss, optimizer, epoch_step, epoch, gen, Epoch, save_period, save_dir, cuda):
    """
    parameters:
        gen:         自己的数据集中的训练集
        gen_val:     自己的数据集中的验证集
        Epoch:       总的训练轮数
        Batch_size:  目前是16
    """
    # 总损失初始化
    total_contrastive_loss = 0
    print('Start Train')
    pbar = tqdm(total=epoch_step, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict, mininterval=0.3)
    model_train.train()

    for iteration, batch in enumerate(gen):
        if iteration > epoch_step:
            break

        images, labels = batch
        with torch.no_grad():
            if cuda:
                images = images.cuda()
                labels = labels.cuda()

            optimizer.zero_grad()
            # 预训练的模型返回的输出
            output = model_train(images)
            # 计算损失
            _contrastive_loss = loss(output, labels)
            _contrastive_loss.requires_grad_(True)
            _contrastive_loss.backward()  # 向后传播
            optimizer.step()

            total_contrastive_loss += _contrastive_loss.item()

        pbar.set_postfix(**{'total_contrastive_loss': total_contrastive_loss / (iteration + 1),
                            'lr': get_lr(optimizer)})
        pbar.update(1)

    pbar.close()
    print('Finish Train')
    model_train.eval()

    # 保存权重
    print('Epoch:' + str(epoch + 1) + '/' + str(Epoch))
    print('Total Loss: %.4f' % (total_contrastive_loss / epoch_step))
    if (epoch + 1) % save_period == 0 or epoch + 1 == Epoch:
        torch.save(model.state_dict(),
                   os.path.join(save_dir, 'ep%03d-loss%.6f.pth' % ((epoch + 1), total_contrastive_loss / epoch_step)))

