## this version recover the trainset and the validationset,respectiverly.
## And predict one time per train many times
## this version is norm.
## 2020.08.18

import argparse
import logging
import os
import sys
import cv2
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from tqdm import tqdm
import torch.backends.cudnn as cudnn
from torchsummary import summary

from eval import eval_net
# from unet_S import UNet
from models.pp_unet import UNet
# from torch.utils.tensorboard import SummaryWriter
from utils.dataset import BasicDataset
from torch.utils.data import DataLoader, random_split
from utils.parsers import parse_cfg
from argparse import Namespace


def train_unet(cfgs,device):
    cfgs.update(parse_cfg(cfgs["cfgpath"]))
    cfgs = Namespace(**cfgs)
    dataset = BasicDataset(cfgs.train_dir, cfgs.mask_dir, cfgs.img_scale)
    n_val = int(len(dataset) * cfgs.val_percent)
    n_train = len(dataset) - n_val
    train, val = random_split(dataset, [n_train, n_val])
    train_loader = DataLoader(train, batch_size=cfgs.batch_size, shuffle=True, num_workers=2, pin_memory=True)
    val_loader = DataLoader(val, batch_size=cfgs.batch_size, shuffle=False, num_workers=2, pin_memory=True, drop_last=True)

    # writer = SummaryWriter(comment=f'LR_{lr}_BS_{batch_size}_SCALE_{img_scale}')

    if cfgs.pre_train:
        net = torch.load('G:/train_data/model_ppunet1/96_net.pt')
        summary(net, (3, 512, 512))
    else:
        net = UNet(in_channels=cfgs.inchannel, out_channels=cfgs.classnum)
        b=0
    net = net.to(device)

    global_step = 0
    # logging.info(f'''Starting training:
    #     Epochs:          {cfgs.epochs}
    #     Batch size:      {cfgs.batch_size}
    #     Learning rate:   {cfgs.lr}
    #     Training size:   {cfgs.n_train}
    #     Validation size: {cfgs.n_val}
    #     Checkpoints:     {cfgs.save_cp}
    #     Device:          {device.type}
    #     Images scaling:  {cfgs.img_scale}
    # ''')

    optimizer = optim.RMSprop(net.parameters(), lr=cfgs.lr, weight_decay=1e-8, momentum=0.9)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min' if 4 > 1 else 'max', patience=2)

    # weights = [1.0, 1.0, 1.0, 1.0, 2.0, 1.0]
    # weights = [1.0, 1.0, 1.0, 1.0]  # 4 classes
    # weights = [0.0558, 2.0, 1.0, 40]  # 4 classes  379imgs
    # weights = [0.066, 2, 1, 2]  # 4 classes  2200imgs
    # weights = [1.5, 12.5, 1.0, 15.0, 7.0] #背景，水，植被，道路，居民区
    # weights = [1,26,12,58,48,1]#背景，沙滩，滩涂，红树林，岛体，水体 10.16
    weights = [1,26,10,50,68,1]#背景，沙滩，滩涂，红树林，岛体，水体
    # weights = [1, 12.5, 3, 2, 2, 1,20] #不变，水体，地面，低矮植被，树木，建筑物，运动场
    class_weights = torch.FloatTensor(weights).cuda()
    criterion = nn.CrossEntropyLoss(weight=class_weights)

    for epoch in range(cfgs.epochs):
        net.train()

        epoch_loss = 0
        epoch_corr = 0
        cnt = 0
        with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{cfgs.epochs}', unit='img') as pbar:
            for batch in train_loader:
                cnt = cnt + 1
                imgs = batch['image']
                true_masks = batch['mask']
                # assert imgs.shape[1] == 4, \
                #     f'Network has been defined with {net.n_channels} input channels, ' \
                #     f'but loaded images have {imgs.shape[1]} channels. Please check that ' \
                #     'the images are loaded correctly.'

                imgs = imgs.to(device=device, dtype=torch.float32)
                mask_type = torch.long
                true_masks = true_masks * 255
                true_masks = true_masks.to(device=device, dtype=torch.long)



                masks_pred = net(imgs)

                true_masks = true_masks.squeeze(1)
                loss = criterion(masks_pred, true_masks)
                # print("***",loss)
                epoch_loss += loss.item()
                # epoch_loss += loss.data[0]

                pred = masks_pred.max(1, keepdim=True)[1]  # get the index of the max log-probability
                epoch_corr += pred.eq(true_masks.view_as(pred)).sum().item()

                avg_acc = epoch_corr/(cnt*cfgs.batch_size*512*512)
                # writer.add_scalar('Loss/train', loss.item(), global_step)
                aa = 'batch loss',round(loss.item(),2),'total loss',round(epoch_loss,2),'avg train acc%',round(avg_acc*100,2)
                pbar.set_postfix({'results': aa})
                # pbar.set_postfix(**{'loss (batch)': loss.item()})



                optimizer.zero_grad()
                loss.backward()
                nn.utils.clip_grad_value_(net.parameters(), 0.1)
                optimizer.step()

                pbar.update(imgs.shape[0])
                global_step += 1


            # pbar.set_postfix(**{'loss (batch)': epoch_corr/(cnt*batch_size*512*512)})
            # print(epoch_loss)
            # print(epoch_corr/(cnt*batch_size*512*512))

            if epoch % 2 == 0:
                torch.save(net, cfgs.weight_out+str(epoch)+'_net.pt')
                torch.save(net.state_dict(),cfgs.weight_out+str(epoch)+'_net.pth')
                val_acc = eval_net(net, val_loader, device, cfgs.batch_size)
                print('val acc%:',round(val_acc*100,2))
                b=0

        # if cfgs.save_cp:
        #     try:
        #         os.mkdir(dir_checkpoint)
        #         logging.info('Created checkpoint directory')
        #     except OSError:
        #         pass
        #     torch.save(net.state_dict(),
        #                dir_checkpoint + f'CP_epoch{epoch + 1}.pth')
            # logging.info(f'Checkpoint {epoch + 1} saved !')

    # writer.close()

if __name__ == '__main__':

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    train_unet(parse_cfg("F:/detect_change/cfg/unet/train_unet.cfg"))
    # dir_img = 'G:/train_data/img/'
    # dir_mask = 'G:/train_data/label/'
    # dir_checkpoint = './checkpoints/'
    #
    # logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
    # args = get_args()

    # logging.info(f'Using device {device}')

    # if args.load:
    #     net = torch.load('G:/train_data/model_ppunet1/96_net.pt')
    #     summary(net, (3, 512, 512))
    # else:
    #     net = UNet(in_channels=3, out_channels=6)
    #     b=0

    # net.to(device=device)

    # try:
    #     train_net(net=net,
    #               epochs=args.epochs,
    #               batch_size=args.batchsize,
    #               lr=args.lr,
    #               device=device,
    #               img_scale=args.scale,
    #               val_percent=args.val / 100)
    # except KeyboardInterrupt:
    #     torch.save(net.state_dict(), 'INTERRUPTED.pth')
    #     logging.info('Saved interrupt')
    #     try:
    #         sys.exit(0)
    #     except SystemExit:
    #         os._exit(0)
