#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@author:hengk
@contact: hengk@foxmail.com
@datetime:2019-10-30 14:41
"""

import os
import datetime
import torch.optim as optim
import torch

from torch.utils.data import DataLoader
from torch.autograd import Variable
from tensorboardX import SummaryWriter

from backbones import BackBoneFactory
from losses import LossFactory
from datasets.chartsets import ChartSets,ChartsCollate
from utils import load_config,WarmUpLR

def eval_training(epoch):

    net.eval()

    correct_count = torch.zeros((1, 10)).cuda()
    test_loss = 0
    for (images, labels) in test_loader:
        images = Variable(images)
        labels = Variable(labels)

        images = images.cuda()
        labels = labels.cuda()

        outputs = net(images)
        outputs = torch.sigmoid(outputs)

        los = loss(outputs, labels)
        test_loss = test_loss + los.item()

        outputs[outputs > 0.5] = 1
        outputs[outputs <= 0.5] = 0
        ret = (outputs.long() == labels)
        correct_count = correct_count + torch.sum(ret, dim=0)

    print('Test set: Average loss: {:.4f}, Accuracy: {:.4f},{:.4f},{:.4f},{:.4f},{:.4f},{:.4f},{:.4f},{:.4f},{:.4f},{:.4f}'.format(
        test_loss / len(test_loader.dataset),
        correct_count[0].float() / len(test_loader.dataset),
        correct_count[1].float() / len(test_loader.dataset),
        correct_count[2].float() / len(test_loader.dataset),
        correct_count[3].float() / len(test_loader.dataset),
        correct_count[4].float() / len(test_loader.dataset),
        correct_count[5].float() / len(test_loader.dataset),
        correct_count[6].float() / len(test_loader.dataset),
        correct_count[7].float() / len(test_loader.dataset),
        correct_count[8].float() / len(test_loader.dataset),
        correct_count[9].float() / len(test_loader.dataset)
    ))

    writer.add_scalar('Test/Average loss', test_loss / len(test_loader.dataset), epoch)
    writer.add_scalar('Test/Accuracy', torch.sum(correct_count, dim=2).float()/ (10*len(test_loader.dataset)), epoch)

def train(epoch):
    net.train()
    for batch_index, (images, labels) in enumerate(train_loader):

        images = Variable(images)
        labels = Variable(labels)

        labels = labels.cuda()
        images = images.cuda()

        opt.zero_grad()
        outputs = net(images)
        los = loss(outputs, labels)
        los.backward()
        opt.step()

        if epoch <= train_param.warm:
            warm_up_scheduler.step()
        n_iter = (epoch - 1) * len(train_loader) + batch_index + 1
        writer.add_scalar('Train/loss', los.item(), n_iter)
        print('Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.10f}\tLR: {:0.10f}'.format(
            los.item(),
            opt.param_groups[0]['lr'],
            epoch=epoch,
            trained_samples=batch_index * train_param.batchsize,
            total_samples=len(train_loader.dataset)
        ))




if __name__ == '__main__':

    cfg = load_config("config/config.yaml")
    train_param = cfg.train
    test_param = cfg.test

    net = BackBoneFactory.create(train_param.backbone,train_param.pretrain,train_param.num_class)
    if(train_param.gpu!=""):
        os.environ["CUDA_VISIBLE_DEVICES"] = train_param.gpu
        net.cuda()

    if(train_param.resume_path!=""):
        checkpoint = torch.load(train_param.resume_path)
        net.load_state_dict(checkpoint)

    writer = SummaryWriter(log_dir=os.path.join(
        train_param.log_dir,train_param.backbone, str(datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S'))))

    loss = LossFactory.create(train_param.loss)
    opt = optim.Adam(net.parameters(), lr=train_param.lr, betas=(0.9, 0.99))
    collate = ChartsCollate()

    train_set = ChartSets(train_param.train_images,train_param.train_labels,train_param.long_size,True)
    train_loader = DataLoader(
        train_set,
        batch_size=train_param.batchsize,
        collate_fn=collate,
        shuffle=True,
        num_workers=3,
        drop_last=True,
        pin_memory=True)

    test_set = ChartSets(test_param.test_images, test_param.test_labels, test_param.long_size, True)
    test_loader = DataLoader(
        test_set,
        batch_size=test_param.batchsize,
        collate_fn=collate,
        shuffle=True,
        num_workers=3,
        drop_last=True,
        pin_memory=True)

    # 训练阶段的学习率调整计划
    train_scheduler = optim.lr_scheduler.MultiStepLR(opt, milestones=train_param.milestones, gamma=0.1)
    # 暖身阶段的学习率
    warm_up_scheduler = WarmUpLR(opt, len(train_loader)*train_param.warm)
    for epoch in range(1,train_param.epoch+1):
        if epoch > train_param.warm:
            train_scheduler.step(epoch)
        train(epoch)

        # eval_training(epoch)
        checkpoint_path = os.path.join(train_param.checkpoints, '{net}-{epoch}.pth')
        torch.save(net.state_dict(), checkpoint_path.format(net=train_param.backbone, epoch=epoch))
