# -*- coding: utf-8 -*-
# @Time : 2020/11/2 12:09
# @Author : wudeyang
# @email :wudeyang@sjtu.edu.cn
# @Description:


# -*- coding: utf-8 -*-
# @Time    : 2019/8/23 21:58
# @Author  : zhoujun
import os
import cv2
import shutil
import numpy as np

import time
from tqdm import tqdm
import torch
import torchvision.utils as vutils
from torchvision import transforms

from utils import WarmupPolyLR
import torch.nn.functional as F

from . import BaseTrainer


class Trainer(BaseTrainer):
    def __init__(self, config, model, criterion, train_loader, weights_init=None):
        super(Trainer, self).__init__(config, model, criterion, weights_init)
        self.show_images_interval = self.config['trainer']['show_images_interval']

        self.train_loader = train_loader
        self.train_loader_len = len(self.train_loader)

        if self.config['lr_scheduler']['type'] == 'WarmupPolyLR':
            warmup_iters = config['lr_scheduler']['args']['warmup_epoch'] * self.train_loader_len
            if self.start_epoch > 1:
                self.config['lr_scheduler']['args']['last_epoch'] = (self.start_epoch - 1) * self.train_loader_len
            self.scheduler = WarmupPolyLR(self.optimizer, max_iters=self.epochs * self.train_loader_len,
                                          warmup_iters=warmup_iters, **config['lr_scheduler']['args'])
        #
        #
        # if self.config['lr_scheduler']['type'] == 'PolynomialLR':
        #     self.scheduler = PolynomialLR(self.optimizer, self.epochs * self.train_loader_len)

        self.logger.info('train dataset has {} samples,{} in dataloader'.format(self.train_loader.dataset_len,
                                                                                self.train_loader_len))

    def _train_epoch(self, epoch):
        self.model.train()
        epoch_start = time.time()
        batch_start = time.time()
        train_loss = 0.

        lr = self.optimizer.param_groups[0]['lr']
        for i, (images, masks, gauss) in enumerate(self.train_loader):

            ##因为80万图过大，所以在global setp 5000处保存##
            try:
                self.epoch_result
            except:
                self.epoch_result={'train_loss': 0, 'lr': 0, 'time': 0,
                                   'epoch': 0}
            if self.global_step % 1000==0:
                net_save_path = '{}/PANNet_latest{}.pth'.format(self.checkpoint_dir,self.global_step)
                self._save_checkpoint(self.epoch_result['epoch'], net_save_path)

            lr = self.optimizer.param_groups[0]['lr']
            cur_batch_size = images.size()[0]
            images, masks, gauss = images.to(self.device), masks.to(self.device), gauss.to(
                self.device)
            preds = self.model(images)
            loss=self.criterion(gauss,preds,masks) # gausst,pre,mask
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            # loss 和 acc 记录到日志，训练集的精度和测试集的精度待实现
            loss = loss.item()
            if (i + 1) % self.display_interval == 0:
                batch_time = time.time() - batch_start
                self.logger.info(
                    '[{}/{}], [{}/{}], global_step: {}, Speed: {:.1f} samples/sec,   loss: {:.4f}, lr:{:.6}, time:{:.2f}'.format(
                        epoch, self.epochs, i + 1, self.train_loader_len, self.global_step,
                        self.display_interval * cur_batch_size / batch_time,  loss, lr, batch_time))
                batch_start = time.time()
            if self.tensorboard_enable:
                # write tensorboard
                self.writer.add_scalar('TRAIN/LOSS', loss, self.global_step)

                self.writer.add_scalar('TRAIN/lr', lr, self.global_step)
                if i % self.show_images_interval == 0:
                    # show images on tensorboard
                    # 进行插值将图放小，便于可视化
                    images = F.interpolate(images, size=list(np.array(images.size()[2:])//4), mode='bilinear', align_corners=False)

                    # images = vutils.make_grid(images, nrow=cur_batch_size//8, normalize=False,
                    #                               padding=20,
                    #                               pad_value=1)
                    self.writer.add_images('TRAIN/imgs', images, self.global_step)
                    # text kernel and training_masks

                    show_label = masks.unsqueeze(1)
                    show_label = F.interpolate(show_label.float(), size=list(np.array(show_label.size()[2:])//4), mode='bilinear', align_corners=False)

                    show_label = vutils.make_grid(show_label, nrow=cur_batch_size//8, normalize=False,
                                                  padding=20,
                                                  pad_value=1)
                    self.writer.add_image('TRAIN/gt_mask', show_label, self.global_step)


                    show_label =  gauss.unsqueeze(1)
                    show_label = F.interpolate(show_label, size=list(np.array(show_label.size()[2:])//4), mode='bilinear', align_corners=False)

                    show_label = vutils.make_grid(show_label, nrow=cur_batch_size//8, normalize=False,
                                                  padding=20,
                                                  pad_value=1)
                    self.writer.add_image('TRAIN/gt_gauss', show_label, self.global_step)
                    # model output

                    show_pred = preds[0][:, :, :, 0].unsqueeze(1)

                    show_pred = F.interpolate(show_pred, size=list(np.array(show_pred.size()[2:])//2), mode='bilinear', align_corners=False)
                    show_pred = vutils.make_grid(show_pred, nrow=cur_batch_size//8, normalize=False,
                                                 padding=20,
                                                 pad_value=1)
                    self.writer.add_image('TRAIN/preds', show_pred, self.global_step)
            self.global_step+=1

        return {'train_loss': train_loss / self.train_loader_len, 'lr': lr, 'time': time.time() - epoch_start,
                'epoch': epoch}

    def _eval(self):
        ## 还未实现
        pass

    def _on_epoch_finish(self):
        # 作为预训练数据，参考了PAN的写法，此处删了很多代码暂时还没有分测试集合
        self.logger.info('[{}/{}], train_loss: {:.4f}, time: {:.4f}, lr: {}'.format(
            self.epoch_result['epoch'], self.epochs, self.epoch_result['train_loss'], self.epoch_result['time'],
            self.epoch_result['lr']))


    def _on_train_finish(self):
        for k, v in self.metrics.items():
            self.logger.info('{}:{}'.format(k, v))
        self.logger.info('finish train')
