import glob
import multiprocessing as mp
import os
import pickle
import time
import xml.etree.ElementTree as ET
from operator import index

import cv2
import matplotlib.pyplot as plt
import numpy as np
import spectral.io.envi as envi
import torch
from numpy import flip, size
from sklearn import metrics
from sshkeyboard import listen_keyboard
from torch.utils.data import Dataset as BaseDataset
from tqdm import tqdm


class listen_plot:
    def __init__(self, training, task, *arg) -> None:
        self.task = task
        self.kwarg = arg
        self.training = training
        listen_keyboard(on_press=self.press,
                        on_release=self.release)

        
    def press(self, key):
        if key == 'p':
            test_acc = self.training.evaluate(self.training.test_loader, self.training.test_result)
            print('{} set Accuracy:{:.2%}'.format('Test', test_acc))
            print(f"'{key}' pressed")
        
        
    def release(self, key):
        if key == 'p':
            self.task(**self.kwarg) 
            print(f"'{key}' released")

class DataResult():

    def refresh(self):
        self.y_score = []
        self.y_true = []
 
    def get_metric(self):
        fpr, tpr, thresholds = metrics.roc_curve(self.y_true, self.y_score, pos_label=1)
        self.auc = metrics.auc(fpr, tpr)
        self.ylabel_pre = list(map(round, self.y_score)) 
        self.accuracy_score =  metrics.accuracy_score(self.y_true, self.ylabel_pre)
        self.precision = metrics.precision_score(self.y_true, self.ylabel_pre, )
        self.recall = metrics.recall_score(self.y_true, self.ylabel_pre, )

class ProcessedData(object):
    def __init__(self, patch=None, gt=None, pos=None) -> None:
        super().__init__()
        self.patch = patch
        self.gt = gt
        self.pos = pos

def normlize(patch):
    norm_patch = np.zeros(shape=patch.shape)
    for batchidx in range(patch.shape[0]):
        if len(patch.shape) > 2:
            norm_patch[batchidx] = normlize(patch[batchidx])
        else:
            image = patch[batchidx, :]
            image = (image-image.min())/(image.max()-image.min())
            norm_patch[batchidx, :] = image
    return norm_patch


class DataPreProcess(object):
    def __init__(self,
                 IMAGE: np.array,
                 patchsize: int,
                 datapath: str,
                 tasknum=20) -> None:
        self.IMAGE = IMAGE
        self.patchsize = patchsize
        self.datapath = datapath
        self.tasknum = tasknum
        self._build()
        pass
    
    def _build(self):
        try:
            if os.path.exists(self.datapath + 'Patch_{}.npy'.format(self.patchsize)):
                print('读处理好的数据' + '.'*30)
            self.processeddata_patch = np.load(self.datapath + 'Patch_{}.npy'.format(self.patchsize))
            print('Lucky Dog! Patch data already exists!')
        except (FileNotFoundError, KeyError, EOFError):
            start = time.time()
            size1, size2 = self.IMAGE.shape[:2]
            img_pos = np.zeros(shape=((size1)*(size2),2), dtype=int)
            t = 0
            for i in range(size1):
                for j in range(size2):
                    img_pos[t] = np.array([i, j])    
                    t += 1
            self.processeddata_patch = self.getpatch(img_pos, self.IMAGE, 'All').transpose(0, 1, 4, 2, 3)
            np.save(self.datapath + 'Patch_{}.npy'.format(self.patchsize), self.processeddata_patch)
            end = time.time()
            print(end-start)
            print('Patch data has been built!')
        
        return self.processeddata_patch 


    @classmethod
    def parsespdata(cls, spdata) -> np.array:
        d = {}
        for dataname, label_pos in spdata.items():
            pos = []
            # gt = []  
            for label_str, poslabel in label_pos.items():
                pos = pos + list(poslabel[0])
                # gt = gt + list(poslabel[1])
            d[dataname] = np.array(pos)
        return d 

    def getpatch(self, posdata, image, dataname):
        if posdata is None:
            return None
        else:
            imgpatch = np.empty(shape=(image.shape[0], image.shape[1], self.patchsize, self.patchsize, self.IMAGE.shape[2]))
            print('=================== {0} {2} samples to process with {1} multi-process  ===================='.format(len(posdata), self.tasknum, dataname))

            print('starting')
            for x in tqdm(range(image.shape[0])):
                for y in range(image.shape[1]):
                    img_pos = [x, y]
                    # 取patch
                    for i in range(self.patchsize):
                        for j in range(self.patchsize):
                            if any([img_pos[0]-self.patchsize//2 + i  < 0,
                                    img_pos[0]-self.patchsize//2 + i > image.shape[0] - 1,
                                    img_pos[1]-self.patchsize//2 + j < 0,
                                    img_pos[1]-self.patchsize//2 + j > image.shape[1] - 1]):
                                imgpatch[x, y, i, j, :] = image[img_pos[0], img_pos[1], :]
                            else:
                                imgpatch[x, y, i, j, :] = image[img_pos[0]-self.patchsize//2 + i, 
                                                                img_pos[1]-self.patchsize//2 + j, :]
           
            return imgpatch

    def bd_subtask(self, image_classindlist, IMAGE, patchsize):
        imgpatch= np.empty(shape=(len(image_classindlist), patchsize, patchsize, IMAGE.shape[2]))
        for idx, img_pos in enumerate(tqdm(image_classindlist)):
            for i in range(patchsize):
                for j in range(patchsize):
                    if any([img_pos[0]-patchsize//2 + i  < 0,
                            img_pos[0]-patchsize//2 + i > IMAGE.shape[0] - 1,
                            img_pos[1]-patchsize//2 + j < 0,
                            img_pos[1]-patchsize//2 + j > IMAGE.shape[1] - 1]):
                        imgpatch[idx, i, j, :] = IMAGE[img_pos[0], img_pos[1], :]
                    else:
                        imgpatch[idx, i, j, :] = IMAGE[img_pos[0]-patchsize//2 + i, 
                                                        img_pos[1]-patchsize//2 + j, :]
        return imgpatch


class MyDataset(BaseDataset):
    """CamVid Dataset. Read images, apply augmentation and preprocessing transformations.
    Args:
        images_dir (str): path to images folder
        masks_dir (str): path to segmentation masks folder
        class_values (list): values of classes to extract from segmentation mask
        augmentation (albumentations.Compose): data transfromation pipeline
            (e.g. flip, scale, etc.)
        preprocessing (albumentations.Compose): data preprocessing
            (e.g. noralization, shape manipulation, etc.)
    """

    def __init__(self,  image_npy, gnd_npy, pos_npy):
    
    
        self.image = image_npy
        self.pos = pos_npy
        self.gnd = gnd_npy
        self.length = self.pos.shape[0]

    def __getitem__(self, i):
        x_coor, y_coor = self.pos[i]
        image = self.image[x_coor, y_coor]
        label = self.gnd[x_coor, y_coor]
        return image, label
        
    def __len__(self):
        return self.length


def plot(pos: list, y_pre: list, shape, savepath):
    img = np.zeros(shape[:2]+(3,), dtype=int)
    for p_idx, p in enumerate(y_pre):
        if p == 0:
            color = np.array([0, 255, 0])
        if p == 1:
            color = np.array([255, 0, 0])
        if p == 2:
            color = np.array([0, 0, 255])
        img[tuple(list(pos[p_idx]))] = color
    from matplotlib import pyplot as plt
    from matplotlib.backends.backend_pdf import PdfPages
    with PdfPages(savepath + '.pdf') as pdf:
        fig = plt.figure()
        plt.imshow(img)
        height, width, channels = img.shape
        fig.set_size_inches(width / 100.0, height / 100.0)
        plt.gca().xaxis.set_major_locator(plt.NullLocator())
        plt.gca().yaxis.set_major_locator(plt.NullLocator())
        plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)
        plt.margins(0, 0)
        plt.axis('off')
        plt.xticks([])
        plt.yticks([])  
        pdf.savefig(bbox_inches = 'tight')  # saves the current figure into a pdf page
        plt.close()
    print(savepath + '.pdf ' + 'has been saved') 


def splitdata(groudtruth,
              savepath,
              trainnum=0.1,
              validnum=0.1,
              testnum=0.8,
              ):
    from sklearn.model_selection import train_test_split
    from sklearn.utils.random import sample_without_replacement

    if os.path.exists(savepath + 'spliteddata.npy'):
        print('恭喜你， 划分数据(未取patch)已经存在')
        return np.load(savepath + 'spliteddata.npy', allow_pickle=True).item()
        # with open(savepath + 'spliteddata.pkl', 'rb') as f:
            # return pickle.load(f)
            
    size1 = groudtruth.shape[0]
    size2 = groudtruth.shape[1]
    img_pos = np.zeros(shape=((size1)*(size2),2), dtype=int)
    gt = np.zeros(shape=((size1)*(size2),), dtype=int)
    t = 0
    for i in range(size1):
        for j in range(size2):
           img_pos[t] = np.array([i, j])    
           gt[t] = np.array(groudtruth[i, j])
           t += 1

    spliteddata = {'train':{}, 'valid':{}, 'test':{}}
    for label in range(0, groudtruth.max()+1):
        indice_class = np.where(gt==label)[0]
        gt_class = gt[indice_class]
        imgpos_class = img_pos[indice_class]
        samplepos = None

        nte = imgpos_class.shape[0] - trainnum - validnum if testnum == -1 else testnum

        if trainnum+validnum+nte> 1 :
            samplepos = sample_without_replacement(imgpos_class.shape[0],
                                                    trainnum+validnum+nte)
        elif trainnum+validnum+nte< 1:
            samplepos = sample_without_replacement(imgpos_class.shape[0],
                                             imgpos_class.shape[0]*(trainnum+validnum+nte))
        if samplepos is not None:
            imgpos_class = imgpos_class[samplepos]
            gt_class = gt_class[samplepos]


        pos_train, pos_teva,\
        y_train, y_teva = train_test_split(imgpos_class, gt_class, 
                                           train_size = trainnum,
                                           random_state = time.localtime(time.time()).tm_sec, 
                                           stratify = gt_class)
        spliteddata['train'].update({str(label):[pos_train, y_train]})

        
        testsize = nte / (validnum + nte) if nte < 1 else nte

        if validnum == 0:
            pos_test, y_test = pos_teva, y_teva
            spliteddata['valid'] = None
        else:
            pos_valid, pos_test,\
            y_valid, y_test = train_test_split(pos_teva, y_teva, 
                                                    test_size = testsize,
                                                    random_state = time.localtime(time.time()).tm_sec, 
                                                    stratify = y_teva) 
            spliteddata['valid'].update({str(label):[pos_valid, y_valid]})
        spliteddata['test'].update({str(label):[pos_test, y_test]})
        
    np.save(savepath + 'spliteddata.npy', spliteddata)
    # with open(savepath + 'spliteddata.pkl', 'wb') as f:
            # pickle.dump(spliteddata, f, pickle.HIGHEST_PROTOCOL)
    print('=============split finished===============')
    return spliteddata

def setpath(dataset, trialnumber, NTr, NVa, NTe, modelname):
    foldertype = 'proportion' if NTe + NTr + NVa <= 1 else 'number'
    dataname = dataset.split('/')[-1]
    dataname = dataname.split('-')[0] + '/' + dataname.split('-')[2]
    if NTe == -1:
        datapath = './' + dataset.split('/')[1] + '/' + dataname + '/Split/'+ foldertype + '/Tr_%s/' % NTr + 'Va_%s/' % NVa + 'Te_all/%s/'%str(trialnumber)
    else:
        datapath = './' + dataset.split('/')[1] + '/' +  dataname + '/Split/' + foldertype + '/Tr_%s/' % NTr + 'Va_%s/' % NVa + 'Te_%s/%s/' % (NTe, str(trialnumber))
    resultpath = datapath + 'result/%s/'%modelname
    if not os.path.exists(resultpath):
        os.makedirs(resultpath)
    imagepath = datapath + 'image/%s/'%modelname 
    if not os.path.exists(imagepath):
        os.makedirs(imagepath)
    rootpath = './' + dataset.split('/')[1] + '/' + dataname + '/'
    return resultpath, imagepath, datapath, rootpath


def myplot(processeddata, IMAGE, imagepath, trainingresult: DataResult, testacc=''):
    imgPos = np.array(list(processeddata['train'].pos) + list(processeddata['valid'].pos) + list(processeddata['test'].pos))
    imgGt = np.array(list(processeddata['train'].gt) + list(processeddata['valid'].gt) + trainingresult.ylabel_pre)
    plot(imgPos, imgGt, IMAGE.shape, imagepath + 'testprediction' + testacc)
    # imgPos = np.array(list(processeddata['train'].pos) + list(processeddata['valid'].pos))
    # imgGt = np.array(list(processeddata['train'].gt) + list(processeddata['valid'].gt))
    # plot(imgPos, imgGt, IMAGE.shape, imagepath + 'traindata')
    # imgPos = np.array(list(processeddata['train'].pos) + list(processeddata['valid'].pos) + list(processeddata['test'].pos))
    # imgGt = np.array(list(processeddata['train'].gt) + list(processeddata['valid'].gt) + trainingresult.y_true)
    # plot(imgPos, imgGt, IMAGE.shape, imagepath + 'groundtruth')

def get_imggnd(dataset):
    dark_ref = envi.open('%s.hdr'%dataset, '%s.raw'%dataset)
    dark_nparr = np.array(dark_ref.load())
    dark_nparr = flip(dark_nparr, 0)
    tree = ET.parse('%s.xml'%dataset)
    polygon = tree.getroot().findall('object/polygon')
    
    mask = np.zeros((dark_nparr.shape[0], dark_nparr.shape[1]), dtype="uint8")
    for p in polygon:
        x = p.findall('pt/x')
        y = p.findall('pt/y')
        x_coor = list(map(lambda x:int(x.text), x))
        y_coor = list(map(lambda y:int(y.text), y))
        c = []
        for x, y in zip(x_coor, y_coor):
            c.append([x, y])
        cor_xy = np.array(c)
        # cor_xy = np.hstack(mas(x_coor, y_coor))
        cv2.polylines(mask, np.int32([cor_xy]), 1, 1)
        cv2.fillPoly(mask, np.int32([cor_xy]), 1)
    return dark_nparr, mask


# class MyDataPre(DataPreProcess):
#     def __init__(self,
#                  IMAGE: np.array,
#                  splitdata: dict,
#                  patchsize: int,
#                  datapath: str,
#                  tasknum=20) -> None:
#         DataPreProcess.__init__(self,IMAGE,patchsize,datapath,tasknum=20)
#         self.splitdata = splitdata

if __name__ == '__main__':
    with open('./pathology/pathology/roi1/Split/number/Tr_200/Va_0/Te_all/1/result/PURE3DCNN/result.pkl','rb') as f:
        a = pickle.load(f)
    
    print('end')


