# -*- coding: utf-8 -*-
"""
@project: CloudDetection
@author: Wu Yue
@file: utils
@ide: PyCharm
@creat time: 2020-10-05 21:09
@change time: 2021-01-31 11:11
@function: 工具函数
"""

import os, re
import cv2 as cv
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
from PIL import Image

# import pydensecrf.densecrf as dcrf
# from pydensecrf.utils import unary_from_softmax, create_pairwise_bilateral

import torch


def getImgsPaths(imgDir: str, regExp: str, suffix: str):
    """
    获取图片文件名以及序号对应关系的列表
    :param imgDir: 图片所在文件夹
    :param regExp: 文件名正则表达式
    :param suffix: 后缀名
    :return:
    """
    imgsPath = {}  # key: 图片名   val: 图片路径
    for root, dirs, files in os.walk(imgDir):
        for file in files:
            if re.match(regExp, file):
                filename = file.split(suffix)[0]
                imgsPath[filename] = [root + file]
    return imgsPath


def getPicAndLabelPaths(imgDir: str, labelDir: str, imgSuffix: str, labelSuffix: str, regExp: str):
    """
    获取原始图片和对应的 label 的文件路径构成的 dataframe 表
    [Usage:
    getPicAndLabelPaths(imgDir="./grayPics/data/", labelDir="./grayPics/label/", imgSuffix=".jpg", labelSuffix=".png",
                    regExp="wind[0-9]*_[0-9]*")
    ]
    :param imgDir: 原始图片的路径
    :param labelDir: 标签的路径
    :param imgSuffix: 原始图片的后缀
    :param labelSuffix: 标签的后缀
    :param regExp: 文件名正则表达式，不包含后缀名
    :return:
    """
    originImgs = getImgsPaths(imgDir=imgDir, regExp=regExp+imgSuffix, suffix=imgSuffix)
    labelImgs = getImgsPaths(imgDir=labelDir, regExp=regExp+labelSuffix, suffix=labelSuffix)
    originImgsDf = pd.DataFrame.from_dict(data=originImgs, orient="index", columns=["data"])
    labelImgsDf = pd.DataFrame.from_dict(data=labelImgs, orient="index", columns=["label"])

    pathsDf = pd.concat([originImgsDf, labelImgsDf], axis=1, join="inner")  # join="inner" 代表取交集

    return pathsDf


def getTrainAndValidDataset(allDataPathDf, ratio=0.2):
    """
    将数据划分为训练集和验证集
    :param allDataPathDf: 所有数据(原始图片以及 label)的文件路径
    :param ratio: 验证集所占侧比例
    :return:
    """
    allDataPathDf = shuffle(allDataPathDf)  # 打乱排序
    allNum = allDataPathDf.shape[0]
    validNum = int(allNum * ratio)
    trainNum = allNum - validNum

    trainSet, validSet = allDataPathDf.iloc[:trainNum], allDataPathDf.iloc[trainNum:]
    return trainSet, validSet


def getTestDataset(allDataPathDf):
    """
    制作测试集  不打乱顺序
    :param allDataPathDf: 所有测试数据(原始图片以及 label)的文件路径
    :return:
    """
    testSet = allDataPathDf
    return testSet

def myLoader(imgPath):
    """

    :param imgPath: 单个图片路径
    :return:
    """
    # img = cv.imread(filename=imgPath, flags=0)
    img = np.array(Image.open(imgPath))
    shape = img.shape
    if len(shape) == 3:  # 彩图
        img = np.swapaxes(img, 1, 2)
        img = np.swapaxes(img, 0, 1)
    if np.nanmax(img) > 1:
        img = img / 255.0
    img = torch.from_numpy(img)  # 转为 tensor
    if len(shape) == 2:  # 灰度图需要扩充维度
        img = torch.unsqueeze(img, dim=0)
    return img.float()


def process_bar(percent, start_str='', end_str='', total_length=0):
    """
    进度条
    :param percent:
    :param start_str:
    :param end_str:
    :param total_length:
    :return:
    """
    bar = ''.join(["\033[7;31;40m%s\033[0m"%'   '] * int(percent * total_length)) + ''
    bar = '\r' + start_str + bar.ljust(total_length) + ' {:0>4.1f}% | '.format(percent*100) + end_str
    print(bar, end='', flush=True)


def plotLoss(lossFile, name):
    """
    绘制 loss 变化曲线
    :param lossFile: 训练集和验证集 loss 的 npy 文件
    :param name: 保存的文件名
    :return:
    """
    lossVals = np.load(lossFile)
    trainLoss, validLoss = lossVals[0], lossVals[1]

    # 绘图
    labelFontSize = 20
    ticksFontSize = 15
    legendFontSize = 12

    fig = plt.figure(figsize=[8, 5])
    plt.plot(trainLoss, c="b", ls="-", label="Training set")
    plt.plot(validLoss, c="k", ls="-", label="Validation set")
    plt.legend(loc="upper right", prop={'family': 'Times New Roman', 'size': legendFontSize})
    plt.xticks(fontproperties='Times New Roman', size=ticksFontSize)
    plt.xlabel("Epoch", fontdict={'family': 'Times New Roman', 'size': labelFontSize})
    plt.yticks(fontproperties='Times New Roman', size=ticksFontSize)
    plt.ylabel("Loss", fontdict={'family': 'Times New Roman', 'size': labelFontSize})
    plt.savefig(
        "./{}.pdf".format(name), format="pdf", dpi=1000, bbox_inches='tight')
    plt.close(fig)


# def denseCrf(predImgs, labelImgs, nLabels):
#     """
#     对预测的结果进行 全连接条件随机场 (dense crf) 处理
#     :param predImgs: 预测的结果 (N, 1, H, W)
#     :param labelImgs: label (N, 1, H, W)
#     :param nLabels: 分类个数
#     :return:
#     """
#     N, C, H, W = labelImgs.shape
#
#     d = dcrf.DenseCRF2D(W, H, nLabels)  # width, height, nLabels
#
#     for i in range(N):
#         predImg, labelImg = predImgs[i, 0], labelImgs[i, 0]
#
#         predClassesImg = np.zeros((nLabels, H, W))  # 将 predImg 每个类别分开
#         labelClassesImg = np.zeros((nLabels, H, W))  # 将 labelImg 每个类别分开
#
#         for c in range(nLabels):
#             temp = predImg.copy()
#             temp[predImg!=c] = 0
#             temp[predImg==c] = 1
#             predClassesImg[c] = temp
#             temp = labelImg.copy()
#             temp[labelImg!=c] = 0
#             temp[labelImg==c] = 1
#             labelClassesImg[c] = temp
#
#         # 一元势
#         U = unary_from_softmax(predClassesImg)
#         U = np.ascontiguousarray(U)  # ascontiguousarray 函数将一个内存不连续存储的数组转换为内存连续存储的数组，使得运行速度更快
#         d.setUnaryEnergy(U)
#
#         # 二元势
#         labelImg = np.ascontiguousarray(labelImg)
#         pairwiseEnergy = create_pairwise_bilateral()

