# -*- coding: utf-8 -*-
"""
@project: Unet
@author: Wu Yue
@file: train
@ide: PyCharm
@creat time: 2020-10-05 20:59
@change time:
@function: 
"""
import time, os

import numpy as np
import cv2 as cv

import torch
from torch.utils.data import Dataset, DataLoader

from network.unet import Unet
from utils import *

import  matplotlib.pyplot as plt



""" >>> 参数设置 >>> """

trainDataDir = "./reSplitData/train/data/"
trainLabelDir = "./reSplitData/train/Label/"
testDataDir = "./reSplitData/test/data/"
testLabelDir = "./reSplitData/test/Label/"

epoches = 60
batchsize = 4

validRatio = 0.05  # 验证集比例

""" <<< 参数设置 <<< """



""" >>> 构造加载入模型所用的数据加载器 >>> """

class InputDataSet(Dataset):
    def __init__(self, data, loader=myLoader):
        """

        :param data: trainSet 或 testSet
        :param loader:
        """
        self.XPaths = data["data"].values
        self.YPaths = data["label"].values
        self.loader = loader

    def __getitem__(self, index):
        imgPath = self.XPaths[index]
        labelPath = self.YPaths[index]
        img = self.loader(imgPath)
        label = self.loader(labelPath)
        return img, label

    def __len__(self):
        return self.XPaths.shape[0]

""" <<< 构造加载入模型所用的数据加载器 <<< """



def train(net, epoches, lr=1e-4, gpu=False):
    """
    模型训练
    :param net: 采用的UNet网络
    :param epoches: 训练轮数
    :param lr: 学习率
    :param validRatio: 验证集比例
    :param gpu: 是否使用 GPU
    :return:
    """
    # for k, v in net.named_parameters():
    #     print(k, v.size())
    # optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9)
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    criterion = torch.nn.BCELoss()  # 损失函数

    device = torch.device("cuda:0" if (torch.cuda.is_available() and gpu) else "cpu")
    net = net.to(device)

    print("Start training ...")
    trainDataNum = trainSet.shape[0]
    validDataNum = validSet.shape[0]

    minLoss = 10000
    minValidLoss = 10000
    lossVals = np.full((2, epoches), np.nan)  # 训练集和验证集的 loss 的值
    for epoch in range(epoches):
        startTime = time.time()
        lossSum = 0.0  # 一个 Epoch 中每个 Batch 的 loss 的累加
        avgLoss = 0.0  # 当前 Epoch 的平均 loss
        for i, data in enumerate(trainLoader):
            inputData, labelData = data
            inputData = inputData.to(device)
            labelData = labelData.to(device)
            optimizer.zero_grad()  # ！！！ 反向传播前必须梯度归零
            outputs = net(inputData)
            loss = criterion(outputs.view(-1), labelData.view(-1))
            # 更新参数
            loss.backward()
            optimizer.step()
            # 计算 loss
            lossSum += loss.data
            avgLoss = lossSum / (i + 1)
            process_bar(percent=i*batchsize/trainDataNum, start_str="\tEpoch: {} |".format(epoch+1),
                        end_str="Batch: {}\tBatchLoss: {:.3f}\tAvgLoss: {:.3f}".format(i+1, loss.data, avgLoss),
                        total_length=20)
        # 计算验证集 Loss
        print("\nCalculating loss of validation set ... ")
        validLossSum = 0.0
        validAvgLoss = 0.0
        with torch.no_grad():
            for i, data in enumerate(validLoader):
                inputData, labelData = data
                inputData = inputData.to(device)
                labelData = labelData.to(device)
                outputs = net(inputData)
                loss = criterion(outputs.view(-1), labelData.view(-1))
                validLossSum += loss.data
                validAvgLoss = validLossSum / (i + 1)
                process_bar(percent=i * batchsize / validDataNum, start_str="\tEpoch: {} |".format(epoch + 1),
                            end_str="Batch: {}\tBatchValidLoss: {:.3f}\tAvgValidLoss: {:.3f}".format(i + 1, loss.data, validAvgLoss),
                            total_length=20)

        endTime = time.time()
        remainTime = (endTime - startTime) * (epoches - epoch - 1)
        remainHour = remainTime // 3600
        remainMin = (remainTime % 3600) // 60
        remainSec = remainTime % 60

        lossVals[:, epoch] = avgLoss, validAvgLoss
        print("\nEpoch {} AvgLoss {:.3f} ValidAvgLoss {:.3f}".format(epoch+1, avgLoss, validAvgLoss))
        print("Estimated remaining: {}:{}:{}\n".format(int(remainHour), int(remainMin), int(remainSec)))
        if avgLoss < minLoss:
            minLoss = avgLoss
            minValidLoss = validAvgLoss
            # 保存神经网络
            torch.save(net, 'Unet-best.pkl')
    np.save("lossVals.npy", lossVals)
    plotLoss(lossFile="./lossVals.npy")
    print('Training finished ...')


def test(netFile: str, gpu=False):
    """
    测试模型
    :param netFile: 保存的模型的文件路径
    :param gpu: 是否使用 GPU
    :return:
    """
    if torch.cuda.is_available() and gpu:
        device = torch.device("cuda:0")
        net = torch.load(netFile)
    else:
        device = torch.device("cpu")
        net = torch.load(netFile, map_location=torch.device('cpu'))
    net = net.to(device)
    with torch.no_grad():
        for i, data in enumerate(testLoader):
            inputData, labelData = data
            inputData = inputData.to(device)
            labelData = labelData.to(device)
            outputs = net(inputData)
            # 输出结果
            if not os.path.exists("./testOutput1"): os.makedirs("./testOutput1")
            for j in range(inputData.shape[0]):
                origin = np.array(inputData[j, 0].data.cpu()) * 255
                label = np.array(labelData[j, 0].data.cpu()) * 255
                output = np.array(outputs[j, 0].data.cpu())
                splitPoint = 0.5
                output[output>=splitPoint]=255
                output[output<splitPoint]=0

                finalImg = origin
                finalImg = np.hstack((finalImg, label))
                finalImg = np.hstack((finalImg, output))
                finalImg = np.array(finalImg, dtype=np.int)
                cv.imwrite("./testOutput1/{}-{}.png".format(i, j), finalImg)


if __name__ == '__main__':
    # 获取原始图片以及对应 label 的名称和路径
    trainPathsDf = getPicAndLabelPaths(imgDir=trainDataDir, labelDir=trainLabelDir, imgSuffix=".jpg", labelSuffix=".png",
                                     regExp="wind[0-9]*_[0-9]*_[0-9]*")
    testPathsDf = getPicAndLabelPaths(imgDir=testDataDir, labelDir=testLabelDir, imgSuffix=".jpg", labelSuffix=".png",
                                     regExp="wind[0-9]*_[0-9]*_[0-9]*")
    # 划分训练集、验证集、测试集
    trainSet, validSet = getTrainAndTestDataset(allDataPathDf=trainPathsDf, ratio=validRatio)
    testSet = getTrainAndTestDataset(allDataPathDf=testPathsDf, ratio=0)[0]
    trainData = InputDataSet(data=trainSet)
    validData = InputDataSet(data=validSet)
    testData = InputDataSet(data=testSet)
    trainLoader = DataLoader(trainData, batch_size=batchsize, shuffle=True)
    validLoader = DataLoader(validData, batch_size=batchsize, shuffle=True)
    testLoader = DataLoader(testData, batch_size=10, shuffle=True)

    train(net=Unet(nChannels=1, nClasses=1), epoches=epoches, gpu=True)
    # test(netFile="./Unet-best.pkl", gpu=True)
