# -*- coding: utf-8 -*-
"""
@project: CloudDetection
@author: Wu Yue
@file: train-RGB
@ide: PyCharm
@creat time: 2020-10-05 20:59
@change time:
@function: 
"""
import time, os

import numpy as np
import cv2 as cv

import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from network.unet import Unet
from network.deeplab import DeepLabV1, DeepLabV2, DeepLabV3, DeepLabV3Plus
from network.deeplabV3PRedNet import DeepLabV3PlusRedNet
from network.danet import DANet, get_danet
from utils import *

# import  matplotlib.pyplot as plt


""" >>> 参数设置 >>> """

trainDataDir = "F:\张杰\Data\image0301/train/data/"
trainLabelDir = "F:\张杰\Data\image0301/train/label/"
testDataDir = "F:\张杰\Data\image0301/test/data/"
testLabelDir = "F:\张杰\Data\image0301/test/data/"


epoches = 60
batchsize = 1

validRatio = 0.05  # 验证集比例

""" <<< 参数设置 <<< """


""" >>> 构造加载入模型所用的数据加载器 >>> """


class InputDataSet(Dataset):
    def __init__(self, data, loader=myLoader):
        """

        :param data: trainSet 或 testSet
        :param loader:
        """
        self.XPaths = data["data"].values
        self.YPaths = data["label"].values
        self.loader = loader

    def __getitem__(self, index):
        imgPath = self.XPaths[index]
        labelPath = self.YPaths[index]
        img = self.loader(imgPath)
        label = self.loader(labelPath)
        return img, label

    def __len__(self):
        return self.XPaths.shape[0]


""" <<< 构造加载入模型所用的数据加载器 <<< """

def adjust_learning_rate_poly(epoch, num_epochs, base_lr, power):
    """
    poly  学习率
    """
    lr = base_lr * (1-epoch/num_epochs)**power
    return lr


def train(net, epoches, lr=1e-3, gpu=False, modelName="Unet", multiGPU=True):
    """
    模型训练
    :param net: 采用的网络
    :param epoches: 训练轮数
    :param lr: 学习率
    :param validRatio: 验证集比例
    :param gpu: 是否使用 GPU
    :return:
    """
    # for k, v in net.named_parameters():
    #     print(k, v.size())
    # optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9)
    # optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    criterion = torch.nn.BCELoss()  # 损失函数

    device = torch.device("cuda:0" if (torch.cuda.is_available() and gpu) else "cpu")
    if multiGPU and (torch.cuda.device_count() > 1):
        print("We are using {} GPUs!".format(torch.cuda.device_count()))
        net = torch.nn.DataParallel(net).cuda()
    net = net.to(device)

    print("Start training ...")
    trainDataNum = trainSet.shape[0]
    validDataNum = validSet.shape[0]

    minLoss = 10000
    minValidLoss = 10000
    lossVals = np.full((2, epoches), np.nan)  # 训练集和验证集的 loss 的值
    for epoch in range(epoches):
        # poly 学习率
        polyLr = adjust_learning_rate_poly(epoch=epoch+1, num_epochs=epoches, base_lr=lr, power=2)
        optimizer = torch.optim.Adam(net.parameters(), lr=polyLr)

        startTime = time.time()
        lossSum = 0.0  # 一个 Epoch 中每个 Batch 的 loss 的累加
        avgLoss = 0.0  # 当前 Epoch 的平均 loss
        for i, data in enumerate(trainLoader):
            inputData, labelData = data
            inputData = inputData.to(device)
            labelData = labelData.to(device)
            optimizer.zero_grad()  # ！！！ 反向传播前必须梯度归零
            outputs = net(inputData)

            labelDataSameReso = F.interpolate(input=labelData, size=outputs.shape[-2:])  # 将 labelData 变为与 outputs 相同分辨率
            loss = criterion(outputs.view(-1), labelDataSameReso.view(-1))
            # 更新参数
            loss.backward()
            optimizer.step()
            # 计算 loss
            lossSum += loss.data
            avgLoss = lossSum / (i + 1)
            process_bar(percent=(i + 1) * batchsize / trainDataNum, start_str="\tEpoch: {} |".format(epoch+1),
                        end_str="Batch: {}\tBatchLoss: {:.3f}\tAvgLoss: {:.3f}".format(i+1, loss.data, avgLoss),
                        total_length=20)
        # 计算验证集 Loss
        print("\nCalculating loss of validation set ... ")
        validLossSum = 0.0
        validAvgLoss = 0.0
        with torch.no_grad():
            for i, data in enumerate(validLoader):
                inputData, labelData = data
                inputData = inputData.to(device)
                labelData = labelData.to(device)
                outputs = net(inputData)

                labelDataSameReso = F.interpolate(input=labelData, size=outputs.shape[-2:])  # 将 labelData 变为与 outputs 相同分辨率
                loss = criterion(outputs.view(-1), labelDataSameReso.view(-1))
                validLossSum += loss.data
                validAvgLoss = validLossSum / (i + 1)
                process_bar(percent=(i + 1) * batchsize / validDataNum, start_str="\tEpoch: {} |".format(epoch + 1),
                            end_str="Batch: {}\tBatchValidLoss: {:.3f}\tAvgValidLoss: {:.3f}".format(i + 1, loss.data, validAvgLoss),
                            total_length=20)

        endTime = time.time()
        remainTime = (endTime - startTime) * (epoches - epoch - 1)
        remainHour = remainTime // 3600
        remainMin = (remainTime % 3600) // 60
        remainSec = remainTime % 60

        lossVals[:, epoch] = avgLoss, validAvgLoss
        print("\nEpoch {} AvgLoss {:.3f} ValidAvgLoss {:.3f}".format(epoch+1, avgLoss, validAvgLoss))
        print("Estimated remaining: {}:{}:{}\n".format(int(remainHour), int(remainMin), int(remainSec)))
        if avgLoss < minLoss:
            minLoss = avgLoss
            minValidLoss = validAvgLoss
            # 保存神经网络  只保存参数
            if multiGPU and (torch.cuda.device_count() > 1):
                torch.save(net.module.state_dict(), '{}-best.pkl'.format(modelName))
            else:
                torch.save(net.state_dict(), '{}-best.pkl'.format(modelName))
    np.save("lossVals-{}.npy".format(modelName), lossVals)
    # plotLoss(lossFile="./lossVals-{}.npy".format(modelName), name="lossVals-{}".format(modelName))
    print('Training finished ...')


def test(net, netFile: str, gpu=False, modelName="Unet", outputStyle=0, labelFilenames=None):
    """
    测试模型
    :param net: 所采用的模型
    :param netFile: 保存的模型的参数的文件路径
    :param gpu: 是否使用 GPU
    :param modelName: 模型名称
    :param outputStyle: 输出的样式  0：默认，原图，标注，预测 三个拼接，顺序随机    1：按照原标注的文件名进行预测
    :param labelFilenames: 如果按照原文件名进行预测，需要知道原标注的文件名
    :return:
    """
    if torch.cuda.is_available() and gpu:
        device = torch.device("cuda:0")
        net.load_state_dict(torch.load(netFile))
    else:
        device = torch.device("cpu")
        net.load_state_dict(torch.load(netFile, map_location=torch.device('cpu')))
    net = net.to(device)
    with torch.no_grad():
        currentIndex = 0
        for i, data in enumerate(testLoader):
            inputData, labelData = data
            inputData = inputData.to(device)
            labelData = labelData.to(device)
            outputs = net(inputData)
            # 输出结果
            for j in range(inputData.shape[0]):
                origin = np.array(inputData[j].data.cpu()) * 255
                label = np.array(labelData[j, 0].data.cpu()) * 255
                output = np.array(outputs[j, 0].data.cpu())

                splitPoint = 0.5
                output[output>=splitPoint]=255
                output[output<splitPoint]=0

                if outputStyle == 0:
                    if origin.shape[0] == 1:
                        origin = origin[0]
                    else:
                        origin = np.swapaxes(origin, 0, 1)
                        origin = np.swapaxes(origin, 1, 2)
                        origin = origin[:, :, ::-1]
                        label = np.expand_dims(label, axis=2).repeat(3, axis=2)
                        output = np.expand_dims(output, axis=2).repeat(3, axis=2)
                    # print(origin.shape, label.shape, output.shape)

                    finalImg = origin
                    finalImg = np.hstack((finalImg, label))
                    finalImg = np.hstack((finalImg, output))
                    finalImg = np.array(finalImg, dtype=np.int)

                    styleDir = "Style-0"
                    if not os.path.exists("./testOutput-{}/{}".format(modelName, styleDir)):
                        os.makedirs("./testOutput-{}/{}".format(modelName, styleDir))
                    print(labelFilenames[currentIndex])
                    cv.imwrite("./testOutput-{}/{}/{}.png".format(modelName, styleDir, labelFilenames[currentIndex]), finalImg)
                    currentIndex += 1
                elif outputStyle == 1:
                    styleDir = "Style-1"
                    if not os.path.exists("./testOutput-{}/{}".format(modelName, styleDir)):
                        os.makedirs("./testOutput-{}/{}".format(modelName, styleDir))
                    print(labelFilenames[currentIndex])
                    cv.imwrite("./testOutput-{}/{}/{}.png".format(modelName, styleDir, labelFilenames[currentIndex]), output)
                    currentIndex += 1


if __name__ == '__main__':
    # 获取原始图片以及对应 label 的名称和路径
    trainPathsDf = getPicAndLabelPaths(imgDir=trainDataDir, labelDir=trainLabelDir, imgSuffix=".jpg", labelSuffix=".png",
                                     regExp="wind[0-9]*_[0-9]*[_]*[0-9]*")
    testPathsDf = getPicAndLabelPaths(imgDir=testDataDir, labelDir=testLabelDir, imgSuffix=".jpg", labelSuffix=".jpg",
                                     regExp="wind[0-9]*_[0-9]*[_]*[0-9]*")  # 注意： 如果
    # 划分训练集、验证集、测试集
    trainSet, validSet = getTrainAndValidDataset(allDataPathDf=trainPathsDf, ratio=validRatio)
    testSet = getTestDataset(allDataPathDf=testPathsDf)
    trainData = InputDataSet(data=trainSet)
    validData = InputDataSet(data=validSet)
    testData = InputDataSet(data=testSet)
    trainLoader = DataLoader(trainData, batch_size=batchsize, shuffle=True)
    validLoader = DataLoader(validData, batch_size=batchsize, shuffle=True)
    testLoader = DataLoader(testData, batch_size=5, shuffle=False)

    labelFilenames = testSet._stat_axis.values.tolist()  # testSet 这个 dataframe 的 行名称，即文件名（不含后缀）

    """
    执行训练与验证
    """
    # Unet
    # train(net=Unet(nChannels=3, nClasses=1), epoches=epoches, gpu=True, modelName="Unet")
    # test(net=Unet(nChannels=3, nClasses=1), netFile="./Unet-best.pkl", gpu=True, modelName="Unet",
    #      outputStyle=0, labelFilenames=labelFilenames)
    # test(net=Unet(nChannels=3, nClasses=1), netFile="./Unet-best.pkl", gpu=True, modelName="Unet",
    #      outputStyle=1, labelFilenames=labelFilenames)

    # DeepLab V1
    # train(net=DeepLabV1(nChannels=3, nClasses=1, nBlocks=[3, 4, 23, 3], originReso=True),
    #       epoches=epoches, gpu=True, modelName="DeepLabV1")
    # test(net=DeepLabV1(nChannels=3, nClasses=1, nBlocks=[3, 4, 23, 3], originReso=True),
    #      netFile="./DeepLabV1-best.pkl", gpu=True, modelName="DeepLabV1")

    # DeepLab V2
    # train(net=DeepLabV2(nChannels=3, nClasses=1, nBlocks=[3, 4, 23, 3], atrousRates=[6, 12, 18, 24], originReso=True),
    #       epoches=epoches, gpu=True, modelName="DeepLabV2")
    # test(net=DeepLabV2(nChannels=3, nClasses=1, nBlocks=[3, 4, 23, 3], atrousRates=[6, 12, 18, 24], originReso=True),
    #      netFile="./DeepLabV2-best.pkl", gpu=True, modelName="DeepLabV2")

    # DeepLab V3
    # train(net=DeepLabV3(nChannels=3, nClasses=1, nBlocks=[3, 4, 23, 3], atrousRates=[6, 12, 18, 24],
    #                     multiGrids=[1, 2, 4], outputStride=8, originReso=True),
    #       epoches=epoches, gpu=True, modelName="DeepLabV3")
    # test(net=DeepLabV3(nChannels=3, nClasses=1, nBlocks=[3, 4, 23, 3], atrousRates=[6, 12, 18, 24],
    #                     multiGrids=[1, 2, 4], outputStride=8, originReso=True),
    #      netFile="./DeepLabV3-best.pkl", gpu=True, modelName="DeepLabV3")

    # DeepLab V3+
    # train(net=DeepLabV3Plus(nChannels=3, nClasses=1, nBlocks=[3, 4, 23, 3], atrousRates=[6, 12, 18, 24],
    #                         multiGrids=[1, 2, 4], outputStride=8),
    #       epoches=epoches, gpu=True, modelName="DeepLabV3Plus")
    # test(net=DeepLabV3Plus(nChannels=3, nClasses=1, nBlocks=[3, 4, 23, 3], atrousRates=[6, 12, 18, 24],
    #                        multiGrids=[1, 2, 4], outputStride=8),
    #      netFile="./DeepLabV3Plus-best.pkl", gpu=True, modelName="DeepLabV3Plus")

    # DeepLab V3+ Rednet
    train(net=DeepLabV3PlusRedNet(nChannels=3, nClasses=1, nBlocks=[3, 4, 23, 3], atrousRates=[6, 12, 18, 24],
                                multiGrids=[1, 2, 4], outputStride=8),
         epoches=epoches, gpu=True, modelName="DeepLabV3PlusRedNet")

    # test(net=DeepLabV3PlusRedNet(nChannels=3, nClasses=1, nBlocks=[3, 4, 23, 3], atrousRates=[6, 12, 18, 24],
    #                              multiGrids=[1, 2, 4], outputStride=8),
    #      netFile="./DeepLabV3PlusRedNet-best.pkl", gpu=True, modelName="DeepLabV3PlusRedNet",
    #      outputStyle=1, labelFilenames=labelFilenames)

    # DANet
    # train(net=get_danet(), epoches=epoches, gpu=True, modelName="DANet", multiGPU=False)  # DANet
    # test(net=get_danet(), netFile="./DANet-best.pkl", gpu=True, modelName="DANet")
