import argparse
import atexit
import multiprocessing
import os
import pickle
import sys
import warnings
import xml.etree.ElementTree as ET
from copy import deepcopy
from multiprocessing import Manager, Pipe
from threading import Thread

import cv2
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import spectral.io.envi as envi
import torch
import torch.multiprocessing as mp
from linformer import Linformer
from numpy import flip
from sshkeyboard import listen_keyboard

from AutoGPU import autoGPU, testGPU
from comparetools.global_module.network import CDCNN_network as CDCNN
from hongdanfeng.demo import gain_neighborhood_band
from hongdanfeng.vit_hong import ViT as hongViT
from models import (_1DCNN, _2DCNN, _3DCNN, _3DCNN_1DCNN, _3DCNN_AM, PURE1DCNN,
                    PURE2DCNN, PURE3DCNN, PURE3DCNN_2AM, SAE, SAE_AM,
                    DBDA_network, HamidaEtAl, LeeEtAl, SSRN_network, _2dCNN,
                    myknn, mysvm)
from myViT import ViT as dismodel
from NNViT import ViT as NNViT
from NViT import ViT as NViT
from training_utils import TrainProcess, setup_seed
from utils import *


class MyThread(Thread):
    def __init__(self):
        '''
        :param func: 可调用的对象
        :param args: 可调用对象的参数
        '''
        super().__init__()   # 不要忘记调用Thread的初始化方法

    def run(self):
        while True: 
            try:
                recv = parentPipe.recv()
                if recv[0] == 'plot':
                    T_aux.model.load_state_dict(T.bestmodel)
                    print('评估验证集精度为%{:.2f}对应的测试集'.format(100*recv[1]))
                    test_acc, _ = T_aux.evaluate(T.test_loader, T_aux.test_result)
                    print('{} set Accuracy:{:.2%}'.format('Test', test_acc))
                    T_aux.test_result.refresh()
                    with open(resultpath + 'result.pkl', 'wb') as f:
                        pickle.dump(T_aux.test_result, f, pickle.HIGHEST_PROTOCOL)
                    myplot(processeddata, IMAGE, imagepath, T_aux.test_result,'_%{:.4f}'.format(100*test_acc))
                    print('#'*100 + 'plot successfully' + '#'*100)
            except Exception as e:
                print(e)
            

if __name__ == '__main__':
    print('='*30)
    setup_seed(1993)
    warnings.filterwarnings("ignore")
    parser = argparse.ArgumentParser()
    setup_seed(1993)
    warnings.filterwarnings("ignore")
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset', type=str,  default='./pathology/data/032370b-20x-roi4', metavar='D',
                        help='the dataset path you load')
    parser.add_argument('--trial_number', type=int,  default=1, metavar='T',
                        help='the time you do this trial')
    parser.add_argument('--train_number', type=str,  default='0.05', metavar='NTr', 
                        help='number of training set')
    parser.add_argument('--valid_number', type=str,  default='0.01', metavar='NVa',
                        help='number of valid set')
    parser.add_argument('--test_number', type=str,  default='0.94', metavar='NTe',
                        help='number of test set')
    parser.add_argument('--patchsize', type=int,  default=9, metavar='P',
                        help='patchsize of data') 
    parser.add_argument('--loadbestmodel', type=int,  default=0, metavar='L',
                        help='whether load model')
    parser.add_argument('--modelname', type=str,  default='NDIS_MODEL', metavar='P', help='which model to choose') 
    parser.add_argument('--gpu_ids', type=int,  default=6, metavar='G',
                        help='which gpu to use')
    parser.add_argument('--gpu_num', type=int,  default=1, metavar='G',
                        help='how many gpus to use')
    parser.add_argument('--depth', type=int,  default=5, metavar='G',
                        help='depth of NViT')               
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" 
    args = parser.parse_args()
    gpu_ids = args.gpu_ids
    dataset = args.dataset
    NTr = args.train_number
    trialnumber = args.trial_number
    NTe = args.test_number
    NVa = args.valid_number
    gpu_num = args.gpu_num
    depth = args.depth
    
    patchsize = args.patchsize
    modelname = args.modelname
    load_bestmodel = args.loadbestmodel

    text = '训练比例为 训练集{} 验证集{} 测试集{}'.format(NTr, NVa, NTe)
    print(text)
    print('模型为{}'.format(modelname))
    dark_ref = envi.open('%s.hdr'%dataset, '%s.raw'%dataset)
    dark_nparr = np.array(dark_ref.load())
    tree = ET.parse('%s.xml'%dataset)
   
    polygon = tree.getroot().findall('object/polygon')

    mask = np.zeros((dark_nparr.shape[0], dark_nparr.shape[1]), dtype="uint8")
    for p in polygon:
        x = p.findall('pt/x')
        y = p.findall('pt/y')
        x_coor = list(map(lambda x:int(x.text), x))
        y_coor = list(map(lambda y:int(y.text), y))
        c = []
        for x, y in zip(x_coor, y_coor):
            c.append([x, y])
        cor_xy = np.array(c)
        # cor_xy = np.hstack(mas(x_coor, y_coor))
        cv2.polylines(mask, np.int32([cor_xy]), 1, 1)
        cv2.fillPoly(mask, np.int32([cor_xy]), 1)


    resultpath, imagepath, datapath, rootpath = setpath(dataset, trialnumber , eval(NTr), 
                                                eval(NVa), eval(NTe), modelname)
    IMAGE = np.flipud(dark_nparr)
    IMAGE = normlize(IMAGE)
    print('输入图像归一化， 最大值为{}'.format(IMAGE.max()))
    GND = mask
    # IMAGE = IMAGE[:100, :100, :]
    # GND = GND[:100, :100]


    model = {
              'SAE': SAE,
              'PURE1DCNN': PURE1DCNN,
              'PURE2DCNN':PURE2DCNN,
              'PURE3DCNN': PURE3DCNN,
              'DBDA': DBDA_network,
              '1DCNN': _1DCNN,
              'CDCNN':CDCNN,
              'SSRN': SSRN_network,
              'DIS_MODEL': dismodel,
              'NDIS_MODEL': NViT(depth=3),
              'NNDIS_MODEL':NNViT,
              'DanfengViT': hongViT,
              'DanfengViT3': hongViT(depth=3),
              'SimpledanfengViT': hongViT(mode='ViT'),
              'NViTBaseline': NViT(num_classes=2, depth=depth)}

    model = model[modelname]
    if isinstance(model, type):
        model = model()



    spliteddata = splitdata(GND, datapath, trainnum=eval(NTr), validnum=eval(NVa), testnum=eval(NTe))

    patchdata = DataPreProcess(IMAGE, patchsize, rootpath, 1).processeddata_patch

    patchdata = patchdata.astype(np.float32)


    parentPipe, childPipe = Pipe(True)
    # childPipe.close()
    # dis_model = myViT
    # if modelname == 'danfengViT':
    #     processeddata['train'].patch = gain_neighborhood_band(processeddata['train'].patch, 60, 3, 9).transpose(0, 2, 1)
    #     processeddata['valid'].patch = gain_neighborhood_band(processeddata['valid'].patch, 60, 3, 9).transpose(0, 2, 1)
    #     processeddata['test'].patch = gain_neighborhood_band(processeddata['test'].patch, 60, 3, 9).transpose(0, 2, 1)

    if gpu_ids != -1:
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_ids)
    else:
        while True:
            try:
                autoGPU(gpu_num, 'auto')
                break
            except Exception as e :
                print(e)
                pass
    # print('patch 的尺寸为') 
    # print(processeddata['train'].patch.shape)
    
    if load_bestmodel:
        print('读了最佳模型, 继续训练')
        modelpara = torch.load(resultpath + 'bestmodel.pth')
        try:
            model.load_state_dict(modelpara)
        except RuntimeError:
            model.load_state_dict({k.replace('module.', ''):v for k,v in modelpara.items()})

    
    t = MyThread()
    t.setDaemon(True) 
    t.start()
    parentPipe, childPipe = Pipe()
    if gpu_num > 1:
        model = torch.nn.DataParallel(model)


    #=======================================
    T = TrainProcess(model=model,
                     modelname=modelname,
                     processeddata=[patchdata, GND, spliteddata],
                     train_config='./config_normal.yaml',
                     plotwrapper=[IMAGE, imagepath, text],
                     resultpath=resultpath,
                     pipesend=childPipe
                     )
    model_aux = deepcopy(model)
    T_aux = TrainProcess(model_aux)
    #=======================================



   
    
    T.training_start()
    with open(resultpath + 'result.pkl', 'wb') as f:
        pickle.dump(T.test_result, f, pickle.HIGHEST_PROTOCOL)
    # myplot(processeddata, IMAGE, imagepath, T.test_result)
