"""This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
from torchvision import transforms
import cv2
import torch
from skimage.metrics import structural_similarity as compare_ssim
from skimage.metrics import peak_signal_noise_ratio as compare_psnr
from skimage.metrics import roc_curve
from openpyxl import load_workbook
import pandas as pd

def roc_plot(label=None, prediction= None,title = None, filename = None, savedir = False):
    
    
    plt.plot(fpr, tpr, 'k--', label='ROC (area = {0:.2f})'.format(roc_auc), lw=2)
    plt.xlim([-0.05, 1.05])  
    plt.ylim([-0.05, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')  
    plt.title('ROC Curve')
    plt.legend(loc="lower right")
    plt.show()
    pass

def save_csv(datadict,csvdir,csvfile,sheetname,rowname,colname):
    col_num = len(colname)
    row_num = len(rowname)
    ans = np.zeros((row_num,col_num))
    for i,rowkey in enumerate(rowname):
        for j,colkey in enumerate(colname):
            ans[i,j] = datadict[rowkey][colkey]
    
    data = pd.DataFrame(ans)
    data.columns = colname
    data.insert(0,sheetname,rowname)
    
    
    book = None
    csvfile = os.path.join(csvdir,csvfile+'.xlsx')
    if os.path.exists(csvfile):
        book = load_workbook(csvfile)
    writer = pd.ExcelWriter(csvfile)
        # writer.book = load_workbook(os.path.join('outputs',log.modeltype+'_'+log.datasettype+'_'+ datactr.config.trainratio+'.xlsx'))
    if book is not None:
        writer.book = book
    data.to_excel(excel_writer=writer,sheet_name=sheetname,float_format='%.6f',index=False)
    writer.save()
    writer.close()
    
def LD_param(TP,TN,FP,FN,beta=1):
        Accuracy,Precision,Recall,F1_score,APCER,NPCER,ACER = {},{},{},{},{},{},{}
        beta=beta*beta
        ans = np.zeros((8,7))
        # TP['all'],TN['all'],FP['all'],FN['all']  = 0,0,0,0
        for i,key in enumerate(TP.keys()):
            if (TP[key]+TN[key]+FP[key]+FN[key]) == 0:
                Accuracy[key]=0
            else:
                Accuracy[key] = (TP[key] + TN[key])/(TP[key]+TN[key]+FP[key]+FN[key])
                
            if key == 'all' or key == '真人':
                if (TP[key]+FP[key]) == 0:
                    Precision[key]=0
                else:
                    Precision[key] = TP[key]/(TP[key]+FP[key])
            else:
                if (TN[key]+FN[key]) == 0:
                    Precision[key]=0
                else:
                    Precision[key] = TN[key]/(TN[key]+FN[key])
                    
            if key == 'all' or key == '真人':
                if (TP[key]+FN[key]) == 0:
                    Recall[key]=0
                else:
                    Recall[key] = TP[key]/(TP[key]+FN[key])
            else:
                if (TN[key]+FP[key]) == 0:
                    Recall[key]=0
                else:
                    Recall[key] = TN[key]/(TN[key]+FP[key])
                    
            if (Precision[key] + Recall[key]) == 0:
                F1_score[key]=0
            else:
                F1_score[key] = (1+beta) * Precision[key] * Recall[key] / (beta*Precision[key] + Recall[key])
                
            if (TN[key]+FP[key]) == 0:
                APCER[key]=0
            else:
                APCER[key] = FP[key]/(TN[key]+FP[key])
            
            if (FN[key]+TP[key]) == 0:
                NPCER[key]=0
            else:
                NPCER[key] = FN[key]/(FN[key]+TP[key])
            
            ACER[key] = (APCER[key] + NPCER[key])/2
            ans[i,0],ans[i,1],ans[i,2],ans[i,3],ans[i,4],ans[i,5],ans[i,6], = Accuracy[key],Precision[key],Recall[key],F1_score[key],APCER[key],NPCER[key],ACER[key]
        
        args_dict = {}
        args_dict['Accurate'] = Accuracy
        args_dict['Precision'] = Precision
        args_dict['Recall'] = Recall
        args_dict['F_score'] = F1_score
        args_dict['APCER'] = APCER
        args_dict['NPCER'] = NPCER
        args_dict['ACER'] = ACER
        return args_dict

class AverageMeter(object):
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count
    
    def update_np(self,val):
        self.sum = np.sum(val,0)
        self.count += val.shape[0]
        self.avg = self.sum / self.count

def SSIM(pred,real):
    if isinstance(pred, np.ndarray) and isinstance(real, np.ndarray):
        return compare_ssim(pred,real)
    elif isinstance(pred, torch.Tensor) and isinstance(real, torch.Tensor):
        return compare_ssim(tensor2im(pred),tensor2im(real),multichannel=True)
    else:
        return None

def PSNR(pred,real):
    if isinstance(pred, np.ndarray) and isinstance(real, np.ndarray):
        return compare_psnr(pred,real)
    elif isinstance(pred, torch.Tensor) and isinstance(real, torch.Tensor):
        return compare_psnr(tensor2im(pred),tensor2im(real))
    else:
        return None
    # MAXI = 2**bit - 1
    # pred = normal(pred,MAXI,0)
    # real = normal(real,MAXI,0)
    
    # if isinstance(pred, torch.Tensor) and isinstance(real, torch.Tensor): 
    #     c = torch.nn.MSELoss()
    #     MSE = c(pred,real)
    #     return 10*torch.log10(MAXI/MSE)
    # elif isinstance(pred, np.ndarray) and isinstance(real, np.ndarray):
    #     if pred.shape == real.shape:
    #         MSE = np.sum((pred-real)**2)/(pred.shap[0]*pred.shape[1])
    #         return 10*np.log10(MAXI/MSE)
    # else:
    #     return None

def SNR(pred,real):
    Delta = pred - real
    if isinstance(pred, torch.Tensor) and isinstance(real, torch.Tensor):
        return 10*torch.log10(torch.var(real)/torch.var(Delta))
    elif isinstance(pred, np.ndarray) and isinstance(real, np.ndarray):
        return 10*np.log10(np.var(real)/np.var(Delta))
    else:
        return None
    
def normal(img,r_max,r_min):
    # img = img.astype(np.float128)
    return (img- img.min())*(r_max-r_min)/(img.max()-img.min()) +r_min

def tensor2im(input_image, imtype=np.uint8):
    """"Converts a Tensor array into a numpy image array.

    Parameters:
        input_image (tensor) --  the input image tensor array
        imtype (type)        --  the desired type of the converted numpy array
    """
    if not isinstance(input_image, np.ndarray):
        if isinstance(input_image, torch.Tensor):  # get the data from a variable
            image_tensor = input_image.data
        else:
            return input_image
        image_numpy = image_tensor[0].cpu().float().numpy()  # convert it into a numpy array
        if image_numpy.shape[0] == 1:  # grayscale to RGB
            image_numpy = np.tile(image_numpy, (3, 1, 1))
        image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0  # post-processing: tranpose and scaling
    else:  # if it is a numpy array, do nothing
        image_numpy = input_image
    return image_numpy.astype(imtype)


def diagnose_network(net, name='network'):
    """Calculate and print the mean of average absolute(gradients)

    Parameters:
        net (torch network) -- Torch network
        name (str) -- the name of the network
    """
    mean = 0.0
    count = 0
    for param in net.parameters():
        if param.grad is not None:
            mean += torch.mean(torch.abs(param.grad.data))
            count += 1
    if count > 0:
        mean = mean / count
    print(name)
    print(mean)


def save_image(image_numpy, image_path, aspect_ratio=1.0, Tensor_img=True):
    """Save a numpy image to the disk

    Parameters:
        image_numpy (numpy array) -- input numpy array
        image_path (str)          -- the path of the image
    """
    image_pil = Image.fromarray(image_numpy)
    if len(image_numpy.shape) == 3:
        h, w, _ = image_numpy.shape
    elif len(image_numpy.shape) == 2:
        h, w = image_numpy.shape

    if aspect_ratio > 1.0:
        image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
    if aspect_ratio < 1.0:
        image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
    image_pil.save(image_path)


def print_numpy(x, val=True, shp=False):
    """Print the mean, min, max, median, std, and size of a numpy array

    Parameters:
        val (bool) -- if print the values of the numpy array
        shp (bool) -- if print the shape of the numpy array
    """
    x = x.astype(np.float64)
    if shp:
        print('shape,', x.shape)
    if val:
        x = x.flatten()
        print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
            np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))


def mkdirs(paths):
    """create empty directories if they don't exist

    Parameters:
        paths (str list) -- a list of directory paths
    """
    if isinstance(paths, list) and not isinstance(paths, str):
        for path in paths:
            mkdir(path)
    else:
        mkdir(paths)


def mkdir(path):
    """create a single empty directory if it didn't exist

    Parameters:
        path (str) -- a single directory path
    """
    if not os.path.exists(path):
        os.makedirs(path)

def histeq(im,nbr_bins = 256):
    """对一幅灰度图像进行直方图均衡化"""
    #计算图像的直方图
    #在numpy中，也提供了一个计算直方图的函数histogram(),第一个返回的是直方图的统计量，第二个为每个bins的中间值
    imhist,bins = np.histogram(im.flatten(),nbr_bins,normed= True)
    cdf = imhist.cumsum()   #
    cdf = 255.0 * cdf / cdf[-1]
    #使用累积分布函数的线性插值，计算新的像素值
    im2 = np.interp(im.flatten(),bins[:-1],cdf)
    return im2.reshape(im.shape),cdf

def linear(im,nbr_bins=256):
    im_max = np.max(im)
    im_min = np.min(im)
    return 256/(im_max-im_min)*(im-im_min),np.arange(nbr_bins+1)*int((256)/nbr_bins)

def image2gt(img,group=256,mode='histeq'):
    # save_image(histeq(tensor2im(img),group)[0].astype(np.uint8),"results/gt.jpg")
    if len(img.shape) == 4:
        gt = []
        for k in range(img.shape[0]):
            _img = cv2.cvtColor(tensor2im(img[k:k+1,:,:,:]), cv2.COLOR_BGR2GRAY)
            im2,bins = eval(mode)(_img,group)
            last_level = 0
            gt += [np.zeros(im2.shape)]
            for i,next_level in enumerate(bins):
                idx = np.where((im2>=last_level) & (im2<=next_level))
                gt[k][idx] = i+1
                last_level = next_level
        gt = np.array(gt)
    else:
        img = cv2.cvtColor(tensor2im(img), cv2.COLOR_BGR2GRAY)
        im2,bins = eval(mode)(_img,group)
        last_level = 0
        gt = np.zeros(im2.shape)
        for i,next_level in enumerate(bins):
            idx = np.where((im2>=last_level) & (im2<=next_level))
            gt[idx] = i+1
            last_level = next_level
    
    # save_image(tensor2im(img),"results/gt.jpg")
    return gt
def per_pix_acc(gt,pred):
    if len(gt.shape)==3:
        total = gt.shape[1] * gt.shape[2]
        right = []
        for k in range(gt.shape[0]):
            right += [np.where(np.abs(gt[k,:,:] - pred[k,:,:]) == 0)[0].shape[0]]
        right = np.array(right)
    else:
        right = np.where(np.abs(gt - pred) == 0)[0].shape[0]
        total = gt.shape[0] * gt.shape[1]
    return right/total