import argparse
import atexit
import multiprocessing
import os
import pickle
import time
import warnings
import xml.etree.ElementTree as ET
from copy import deepcopy
from multiprocessing import Manager, Pipe
from threading import Thread

import cv2
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import spectral.io.envi as envi
import torch
import torch.backends.cudnn as cudnn
import torch.multiprocessing as mp
import torch.nn as nn
import torch.utils.data as Data
from linformer import Linformer
from matplotlib import colors
from numpy import flip
from scipy.io import loadmat, savemat
from sklearn.metrics import confusion_matrix
from sshkeyboard import listen_keyboard
from torch import optim
from torch.autograd import Variable

from AutoGPU import autoGPU, testGPU
from hongdanfeng.demo import gain_neighborhood_band
from hongdanfeng.vit_hong import ViT as hongViT
from models import (_1DCNN, _2DCNN, _3DCNN, _3DCNN_1DCNN, _3DCNN_AM, PURE1DCNN,
                    PURE2DCNN, PURE3DCNN, PURE3DCNN_2AM, SAE, SAE_AM,
                    DBDA_network, HamidaEtAl, LeeEtAl, SSRN_network, _2dCNN,
                    myknn, mysvm)
from NViT import Spa_Spe_ViT, myViT
from training_utils import TrainProcess, setup_seed
from utils import DataPreProcess, listen_plot, myplot, plot, setpath, splitdata

if __name__ == '__main__':
    print('='*30)
    setup_seed(1993)
    warnings.filterwarnings("ignore")
    parser = argparse.ArgumentParser()
    setup_seed(1993)
    warnings.filterwarnings("ignore")
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset', type=str,  default='./pathology/data/032370b-20x-roi2', metavar='D',
                        help='the dataset path you load')
    parser.add_argument('--trial_number', type=int,  default=1, metavar='T',
                        help='the time you do this trial')
    parser.add_argument('--train_number', type=str,  default='2000', metavar='NTr', 
                        help='number of training set')
    parser.add_argument('--valid_number', type=str,  default='1000', metavar='NVa',
                        help='number of valid set')
    parser.add_argument('--test_number', type=str,  default='1000', metavar='NTe',
                        help='number of test set')
    parser.add_argument('--patchsize', type=int,  default=9, metavar='P',
                        help='patchsize of data') 
    parser.add_argument('--loadbestmodel', type=int,  default=0, metavar='L',
                        help='whether load model')
    parser.add_argument('--modelname', type=str,  default='danfengViT', metavar='P', help='which model to choose') 
    parser.add_argument('--gpu_ids', type=int,  default=-1, metavar='G',
                        help='which gpu to use')

    

    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" 
    args = parser.parse_args()
    gpu_ids = args.gpu_ids
    
    dataset = args.dataset
    NTr = args.train_number
    trialnumber = args.trial_number
    NTe = args.test_number
    NVa = args.valid_number
    print('训练比例为 训练集{} 验证集{} 测试集{}'.format(NTr, NVa, NTe))
    patchsize = args.patchsize
    modelname = args.modelname
    load_bestmodel = args.loadbestmodel
    print('模型为{}'.format(modelname))
    dark_ref = envi.open('%s.hdr'%dataset, '%s.raw'%dataset)
    dark_nparr = np.array(dark_ref.load())
    dark_nparr = flip(dark_nparr, 0)
    tree = ET.parse('%s.xml'%dataset)
    # plt.figure()
    # plt.imshow(dark_nparr[:,:,10])
    # plt.savefig('test.jpg') 
    #     # get root element
    polygon = tree.getroot().findall('object/polygon')

    mask = np.zeros((dark_nparr.shape[0], dark_nparr.shape[1]), dtype="uint8")
    for p in polygon:
        x = p.findall('pt/x')
        y = p.findall('pt/y')
        x_coor = list(map(lambda x:int(x.text), x))
        y_coor = list(map(lambda y:int(y.text), y))
        c = []
        for x, y in zip(x_coor, y_coor):
            c.append([x, y])
        cor_xy = np.array(c)
        # cor_xy = np.hstack(mas(x_coor, y_coor))
        cv2.polylines(mask, np.int32([cor_xy]), 1, 1)
        cv2.fillPoly(mask, np.int32([cor_xy]), 1)


    resultpath, imagepath, datapath = setpath(dataset, trialnumber , eval(NTr), 
                                                eval(NVa), eval(NTe), modelname)
    
    IMAGE = np.flipud(dark_nparr)
    GND = mask
    spliteddata = splitdata(IMAGE, GND, datapath , trainnum=NTr, validnum=NVa, testnum=NTe)

    processeddata = DataPreProcess(IMAGE, patchsize, datapath, 1).processeddata

    processeddata['train'].patch = gain_neighborhood_band(processeddata['train'].patch, 60, 3, 9)
    processeddata['valid'].patch = gain_neighborhood_band(processeddata['valid'].patch, 60, 3, 9)
    processeddata['test'].patch = gain_neighborhood_band(processeddata['test'].patch, 60, 3, 9)

    a = hongViT(image_size = 9,
            near_band = 1,
            num_patches = 60,
            num_classes = 2,
            dim = 64,
            depth = 5,
            heads = 4,
            mlp_dim = 8,
            dropout = 0.1,
            emb_dropout = 0.1,
            mode = 'CAF')
    arr = np.ones((2, 60, 81))  
# 创建张量，默认存放在CPU
    t1 = torch.tensor(arr).float() 
# 创建张量，存放在GPU
    out = a(t1)
    print('end')

