import glob
import multiprocessing as mp
import os
import pickle
import time
import xml.etree.ElementTree as ET

import cv2
import matplotlib.pyplot as plt
import numpy as np
import spectral.io.envi as envi
import torch
from numpy import flip
from sklearn import metrics
from sshkeyboard import listen_keyboard
from torch.utils.data import Dataset as BaseDataset
from tqdm import tqdm


class listen_plot:
    def __init__(self, training, task, *arg) -> None:
        self.task = task
        self.kwarg = arg
        self.training = training
        listen_keyboard(on_press=self.press,
                        on_release=self.release)

        
    def press(self, key):
        if key == 'p':
            test_acc = self.training.evaluate(self.training.test_loader, self.training.test_result)
            print('{} set Accuracy:{:.2%}'.format('Test', test_acc))
            print(f"'{key}' pressed")
        
        
    def release(self, key):
        if key == 'p':
            self.task(**self.kwarg) 
            print(f"'{key}' released")

class DataResult():

    def refresh(self):
        self.y_score = []
        self.y_true = []
 
    def get_metric(self):
        fpr, tpr, thresholds = metrics.roc_curve(self.y_true, self.y_score, pos_label=1)
        self.auc = metrics.auc(fpr, tpr)
        self.ylabel_pre = list(map(round, self.y_score)) 
        self.accuracy_score =  metrics.accuracy_score(self.y_true, self.ylabel_pre)
        self.precision = metrics.precision_score(self.y_true, self.ylabel_pre, )
        self.recall = metrics.recall_score(self.y_true, self.ylabel_pre, )

class ProcessedData(object):
    def __init__(self, patch=None, gt=None, pos=None) -> None:
        super().__init__()
        self.patch = patch
        self.gt = gt
        self.pos = pos

def normlize(patch):
    norm_patch = np.zeros(shape=patch.shape)
    for batchidx in range(patch.shape[0]):
        if len(patch.shape) > 2:
            norm_patch[batchidx] = normlize(patch[batchidx])
        else:
            image = patch[batchidx, :]
            image = (image-image.min())/(image.max()-image.min())
            norm_patch[batchidx, :] = image
    return norm_patch


def bd_subtask(image_classindlist, IMAGE, patchsize):
    imgpatch= np.empty(shape=(len(image_classindlist), patchsize, patchsize, IMAGE.shape[2]))
    for idx, img_indice in enumerate(tqdm(image_classindlist)):
        for i in range(patchsize):
            for j in range(patchsize):
                if any([img_indice[0]-patchsize//2 + i  < 0,
                        img_indice[0]-patchsize//2 + i > IMAGE.shape[0] - 1,
                        img_indice[1]-patchsize//2 + j < 0,
                        img_indice[1]-patchsize//2 + j > IMAGE.shape[1] - 1]):
                    imgpatch[idx, i, j, :] = IMAGE[img_indice[0], img_indice[1], :]
                else:
                    imgpatch[idx, i, j, :] = IMAGE[img_indice[0]-patchsize//2 + i, 
                                                   img_indice[1]-patchsize//2 + j, :]
    # imgpatch = normlize(imgpatch)
    return imgpatch


class DataPreProcess(object):
    def __init__(self,
                 IMAGE: np.array,
                 patchsize: int,
                 datapath: str,
                 tasknum=20) -> None:
        self.IMAGE = IMAGE
        self.patchsize = patchsize
        self.datapath = datapath
        self.tasknum = tasknum
        self._build()
        pass
    
    def _build(self):
        try:
            with open(self.datapath + 'PatchGt_%s.pkl'%str(self.patchsize), 'rb') as f:
                pklfile = pickle.load(f)
            # pklfile = np.load(self.datapath + 'PatchGt_%s.npy'%str(self.patchsize), allow_pickle=True)
            print('Lucky Dog! Patch data already exists!')
            self.processeddata = pklfile
            self.processeddata['test'].patch = np.load(self.datapath + 'test.npy')

        except (FileNotFoundError, KeyError, EOFError):
            # with open(self.datapath + 'spliteddata.pkl', 'rb') as f:
                # splitimggt = pickle.load(f)
            splitimggt = np.load(self.datapath + 'spliteddata.npy', allow_pickle=True).item()
            trainpos, traingt = self.parsespdata(splitimggt['train'])
            testpos, testgt = self.parsespdata(splitimggt['test'])
            traingt = np.array(traingt)
            testgt = np.array(testgt)
            if splitimggt['valid'] is None:
                validpos = np.empty((0,))
                validgt = np.empty((0,))
            else:
                validpos, validgt = self.parsespdata(splitimggt['valid'])
                validgt = np.array(validgt)
            
            # def mysort(x):
            #     score = x[0][0] + x[0][1]/10000
            #     return score
            
            # def newsequence(pklfile):
            #     pos = []
            #     gt = []
            #     for key, value in pklfile.items():
            #             pos += list(value[0])
            #             gt += list(value[1])
            #     mixdata = zip(pos, gt)
            #     mixdata_list = [(pos, gt) for pos, gt in mixdata]
            #     mixdata_list = sorted(mixdata_list, key=mysort)
            #     pos_list = [x[0] for x in mixdata_list]
            #     gt_list = [x[1] for x in mixdata_list]
            #     return np.array(pos_list), np.array(gt_list)

            # trainpos, traingt = newsequence(splitimggt['train'])
            # testpos, testgt = newsequence(splitimggt['test'])

               
            trainpatch = self.getpatch(trainpos, self.IMAGE, 'Training').transpose(0, 3, 2, 1)
            validpatch = self. getpatch(validpos, self.IMAGE, 'Valid').transpose(0, 3, 2, 1)
            start = time.time()
            testpatch = self.getpatch(testpos, self.IMAGE, 'Test').transpose(0, 3, 2, 1)
            
            self.processeddata = {}
            self.processeddata['train'] = ProcessedData(trainpatch, traingt, trainpos)
            self.processeddata['test'] = ProcessedData(None, testgt, testpos)
            self.processeddata['valid'] = ProcessedData(validpatch, validgt, validpos)
            self.processeddata['test'].patch = testpatch
            np.save(self.datapath + 'test.npy', testpatch)
            with open(self.datapath + 'PatchGt_%s.pkl'%str(self.patchsize), 'wb') as f:
                pickle.dump(self.processeddata, f, pickle.HIGHEST_PROTOCOL)
            end = time.time()
            print(end-start)
            print('Patch data has been built!')


    @classmethod
    def parsespdata(cls, spdata) -> list:
        pos = []
        gt = []
        for _, value in spdata.items():
            pos = pos + list(value[0])
            gt = gt + list(value[1])
        return pos, gt

    def getpatch(self, posdata, image, dataname):
        if posdata is None:
            return None
        else:
            sample_number = len(posdata)
            imgpatch = np.empty(shape=(sample_number, self.patchsize, self.patchsize, self.IMAGE.shape[2]))
            interval = sample_number//self.tasknum
            interval += 1
            print('=================== {0} {2} samples to process with {1} multi-process  ===================='.format(len(posdata), self.tasknum, dataname))
            
            sharelst = mp.Manager().list()
            sharelst.append(imgpatch) 
            processPool = []
            print('starting')
            for i in range(self.tasknum):
                processPool.append(mp.Process(target=self.bd_subtask, 
                                            args=(posdata, i, interval,
                                             image, self.patchsize, sharelst)))
              
            for p in processPool:
                p.start()
            
            for p in processPool:
                p.join()
            
            
            return sharelst[0]
            
        

             
            

    def bd_subtask(self, posdata, startnumber, interval, IMAGE, patchsize, sharelst):
        for idx in tqdm(range(startnumber*interval, min((startnumber+1)*interval, len(posdata)))):
            img_pos = posdata[idx]
            for i in range(patchsize):
                for j in range(patchsize):
                    if any([img_pos[0]-patchsize//2 + i  < 0,
                            img_pos[0]-patchsize//2 + i > IMAGE.shape[0] - 1,
                            img_pos[1]-patchsize//2 + j < 0,
                            img_pos[1]-patchsize//2 + j > IMAGE.shape[1] - 1]):
                        sharelst[0][idx, i, j, :] = IMAGE[img_pos[0], img_pos[1], :]
                    else:
                        sharelst[0][idx, i, j, :] = IMAGE[img_pos[0]-patchsize//2 + i, 
                                                        img_pos[1]-patchsize//2 + j, :]
      


class MyDataset(BaseDataset):
    """CamVid Dataset. Read images, apply augmentation and preprocessing transformations.
    Args:
        images_dir (str): path to images folder
        masks_dir (str): path to segmentation masks folder
        class_values (list): values of classes to extract from segmentation mask
        augmentation (albumentations.Compose): data transfromation pipeline
            (e.g. flip, scale, etc.)
        preprocessing (albumentations.Compose): data preprocessing
            (e.g. noralization, shape manipulation, etc.)
    """

    def __init__(self,  images_npy, label_npy, cuda=True):
        if cuda:
            self.images = torch.from_numpy(images_npy).to('cuda')
            self.labels = torch.from_numpy(label_npy).to('cuda')
        else:
            self.images = torch.from_numpy(images_npy)
            self.labels = torch.from_numpy(label_npy)
        self.length = self.labels.shape[0]

    def __getitem__(self, i):
        image = self.images[i]
        label = self.labels[i]
        return image.float(), label
        
    def __len__(self):
        return self.length


def plot(pos: list, y_pre: list, shape, savepath):
    img = np.zeros(shape[:2]+(3,), dtype=int)
    for p_idx, p in enumerate(y_pre):
        if p == 0:
            color = np.array([0, 255, 0])
        if p == 1:
            color = np.array([255, 0, 0])
        if p == 2:
            color = np.array([0, 0, 2550])
        img[tuple(list(pos[p_idx]))] = color
    from matplotlib import pyplot as plt
    from matplotlib.backends.backend_pdf import PdfPages
    with PdfPages(savepath + '.pdf') as pdf:
        fig = plt.figure()
        plt.imshow(img)
        height, width, channels = img.shape
        fig.set_size_inches(width / 100.0, height / 100.0)
        plt.gca().xaxis.set_major_locator(plt.NullLocator())
        plt.gca().yaxis.set_major_locator(plt.NullLocator())
        plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)
        plt.margins(0, 0)
        plt.axis('off')
        plt.xticks([])
        plt.yticks([])  
        pdf.savefig(bbox_inches = 'tight')  # saves the current figure into a pdf page
        plt.close()
    print(savepath + '.pdf ' + 'has been saved') 


def splitdata(image,
              groudtruth,
              savepath,
              trainnum=0.1,
              validnum=0.1,
              testnum=0.8,
              ):
    from sklearn.model_selection import train_test_split
    from sklearn.utils.random import sample_without_replacement

    if os.path.exists(savepath + 'spliteddata.npy'):
        print('恭喜你， 划分数据(未取patch)已经存在')
        return np.load(savepath + 'spliteddata.npy', allow_pickle=True).item()
        # with open(savepath + 'spliteddata.pkl', 'rb') as f:
            # return pickle.load(f)
            
    size1 = image.shape[0]
    size2 = image.shape[1]
    img_pos = np.zeros(shape=((size1)*(size2),2), dtype=int)
    gt = np.zeros(shape=((size1)*(size2),), dtype=int)
    t = 0
    for i in range(size1):
        for j in range(size2):
           img_pos[t] = np.array([i, j])    
           gt[t] = np.array(groudtruth[i, j])
           t += 1

    spliteddata = {'train':{}, 'valid':{}, 'test':{}}
    for label in range(0, groudtruth.max()+1):
        indice_class = np.where(gt==label)[0]
        gt_class = gt[indice_class]
        imgpos_class = img_pos[indice_class]
        samplepos = None

        nte = imgpos_class.shape[0] - trainnum - validnum if testnum == -1 else testnum

        if trainnum+validnum+nte> 1 :
            samplepos = sample_without_replacement(imgpos_class.shape[0],
                                                    trainnum+validnum+nte)
        elif trainnum+validnum+nte< 1:
            samplepos = sample_without_replacement(imgpos_class.shape[0],
                                             imgpos_class.shape[0]*(trainnum+validnum+nte))
        if samplepos is not None:
            imgpos_class = imgpos_class[samplepos]
            gt_class = gt_class[samplepos]


        pos_train, pos_teva,\
        y_train, y_teva = train_test_split(imgpos_class, gt_class, 
                                           train_size = trainnum,
                                           random_state = time.localtime(time.time()).tm_sec, 
                                           stratify = gt_class)
        spliteddata['train'].update({str(label):(pos_train, y_train)})

        
        testsize = nte / (validnum + nte) if nte < 1 else nte

        if validnum == 0:
            pos_test, y_test = pos_teva, y_teva
            spliteddata['valid'] = None
        else:
            pos_valid, pos_test,\
            y_valid, y_test = train_test_split(pos_teva, y_teva, 
                                                    test_size = testsize,
                                                    random_state = time.localtime(time.time()).tm_sec, 
                                                    stratify = y_teva) 
            spliteddata['valid'].update({str(label):(pos_valid, y_valid)})
        spliteddata['test'].update({str(label):(pos_test, y_test)})
        
    np.save(savepath + 'spliteddata.npy', spliteddata)
    # with open(savepath + 'spliteddata.pkl', 'wb') as f:
            # pickle.dump(spliteddata, f, pickle.HIGHEST_PROTOCOL)
    print('=============split finished===============')
    return spliteddata

def setpath(dataset, trialnumber, NTr, NVa, NTe, modelname):
    foldertype = 'proportion' if NTe + NTr + NVa <= 1 else 'number'
    dataname = dataset.split('/')[-1]
    dataname = dataname.split('-')[0] + '/' + dataname.split('-')[2]
    if NTe == -1:
        datapath = './' + dataset.split('/')[1] + '/' + dataname + '/Split/'+ foldertype + '/Tr_%s/' % NTr + 'Va_%s/' % NVa + 'Te_all/%s/'%str(trialnumber)
    else:
        datapath = './' + dataset.split('/')[1] + '/' +  dataname + '/Split/' + foldertype + '/Tr_%s/' % NTr + 'Va_%s/' % NVa + 'Te_%s/%s/' % (NTe, str(trialnumber))
    resultpath = datapath + 'result/%s/'%modelname
    if not os.path.exists(resultpath):
        os.makedirs(resultpath)
    imagepath = datapath + 'image/%s/'%modelname 
    if not os.path.exists(imagepath):
        os.makedirs(imagepath)
    return resultpath, imagepath, datapath

def myplot(processeddata, IMAGE, imagepath, trainingresult: DataResult, testacc=''):
    imgPos = np.array(list(processeddata['train'].pos) + list(processeddata['valid'].pos) + list(processeddata['test'].pos))
    imgGt = np.array(list(processeddata['train'].gt) + list(processeddata['valid'].gt) + trainingresult.ylabel_pre)
    plot(imgPos, imgGt, IMAGE.shape, imagepath + 'testprediction' + testacc)
    # imgPos = np.array(list(processeddata['train'].pos) + list(processeddata['valid'].pos))
    # imgGt = np.array(list(processeddata['train'].gt) + list(processeddata['valid'].gt))
    # plot(imgPos, imgGt, IMAGE.shape, imagepath + 'traindata')
    # imgPos = np.array(list(processeddata['train'].pos) + list(processeddata['valid'].pos) + list(processeddata['test'].pos))
    # imgGt = np.array(list(processeddata['train'].gt) + list(processeddata['valid'].gt) + trainingresult.y_true)
    # plot(imgPos, imgGt, IMAGE.shape, imagepath + 'groundtruth')

def get_imggnd(dataset):
    dark_ref = envi.open('%s.hdr'%dataset, '%s.raw'%dataset)
    dark_nparr = np.array(dark_ref.load())
    dark_nparr = flip(dark_nparr, 0)
    tree = ET.parse('%s.xml'%dataset)
    polygon = tree.getroot().findall('object/polygon')
    
    mask = np.zeros((dark_nparr.shape[0], dark_nparr.shape[1]), dtype="uint8")
    for p in polygon:
        x = p.findall('pt/x')
        y = p.findall('pt/y')
        x_coor = list(map(lambda x:int(x.text), x))
        y_coor = list(map(lambda y:int(y.text), y))
        c = []
        for x, y in zip(x_coor, y_coor):
            c.append([x, y])
        cor_xy = np.array(c)
        # cor_xy = np.hstack(mas(x_coor, y_coor))
        cv2.polylines(mask, np.int32([cor_xy]), 1, 1)
        cv2.fillPoly(mask, np.int32([cor_xy]), 1)
    return dark_nparr, mask


# class MyDataPre(DataPreProcess):
#     def __init__(self,
#                  IMAGE: np.array,
#                  splitdata: dict,
#                  patchsize: int,
#                  datapath: str,
#                  tasknum=20) -> None:
#         DataPreProcess.__init__(self,IMAGE,patchsize,datapath,tasknum=20)
#         self.splitdata = splitdata

if __name__ == '__main__':
    with open('./pathology/pathology/roi1/Split/number/Tr_200/Va_0/Te_all/1/result/PURE3DCNN/result.pkl','rb') as f:
        a = pickle.load(f)
    
    print('end')


