#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@author:hengk
@contact: hengk@foxmail.com
@datetime:2020-05-01 20:28
"""
from torch.optim.lr_scheduler import _LRScheduler
from backbone.mobilenetV3 import MobileNetV3
from loss.tripletloss import TripletLoss
from dataloader import DataLoader
from easydict import EasyDict
import torch.optim as optim
import yaml
import torch
import os
import time

class WarmUpLR(_LRScheduler):
    """warmup_training learning rate scheduler
    Args:
        optimizer: optimzier(e.g. SGD)
        total_iters: totoal_iters of warmup phase
    """

    def __init__(self, optimizer, total_iters, last_epoch=-1):
        self.total_iters = total_iters
        super().__init__(optimizer, last_epoch)

    def get_lr(self):
        """we will use the first m batches, and set the learning
        rate to base_lr * m / total_iters
        """
        return [base_lr * self.last_epoch / (self.total_iters + 1e-8) for base_lr in self.base_lrs]
if __name__ == '__main__':

    #加载配置文件
    f = open("config/default.yaml", 'r', encoding='utf-8')
    cfg = yaml.load(f.read(), Loader=yaml.FullLoader)
    cfg = EasyDict(cfg)
    epoch_nums = cfg.train.epoch
    batch_size = cfg.train.batch_size
    pretrain = cfg.train.pretrain
    lr = cfg.train.lr
    warm = cfg.train.warm
    milestones = cfg.train.milestone
    #加载数据集
    dl = DataLoader(cfg)
    total_nums = dl.get_len()

    #定义网络结构,加载网络训练参数
    os.environ["CUDA_VISIBLE_DEVICES"] = cfg.train.gpu
    net = MobileNetV3()
    net = net.cuda()

    opt = optim.Adam(net.parameters(), lr= lr, betas=(0.9, 0.99))
    tripletloss = TripletLoss()
    #设置训练的任务计划
    train_scheduler = optim.lr_scheduler.MultiStepLR(opt, milestones=milestones, gamma=0.1)
    warm_up_scheduler = WarmUpLR(opt, dl.get_len()*warm)

    if pretrain != "":
        print("load pretrain model")
        checkpoint = torch.load(pretrain)
        net.load_state_dict(checkpoint)
    for epoch in range(epoch_nums):

        iterations = total_nums//batch_size
        for j in range(iterations):
            if epoch > warm:
                train_scheduler.step()
            else:
                warm_up_scheduler.step()
            images,labels = dl.get_next()
            images = images.cuda()
            labels = labels.cuda()
            start = time.time()
            output = net(images)
            loss = tripletloss(output,labels)
            loss.backward()
            opt.step()
            print("%d / %d loss:%f"%(epoch,j,loss))



        checkpoint_path = os.path.join("model",'{epoch}.pth')
        torch.save(net.state_dict(), checkpoint_path.format(epoch=epoch))
