import argparse
import os
import pickle
import warnings
import xml.etree.ElementTree as ET
from plistlib import loads

import cv2
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import spectral.io.envi as envi
import torch
from linformer import Linformer
from numpy import flip
from sklearn import model_selection, svm
from sklearn.neighbors import KNeighborsClassifier

from AutoGPU import autoGPU
from comparetools.global_module.network import CDCNN_network as CDCNN
from hongdanfeng.vit_hong import ViT as hongViT
from models import (_1DCNN, _2DCNN, _3DCNN, _3DCNN_1DCNN, _3DCNN_AM, PURE1DCNN,
                    PURE2DCNN, PURE3DCNN, PURE3DCNN_2AM, SAE, SAE_AM,
                    DBDA_network, HamidaEtAl, LeeEtAl, SSRN_network, _2dCNN,
                    myknn, mysvm)
from NNViT import ViT as NNViT
from NViT import ViT as NViT
from training_utils import TrainProcess, setup_seed
from utils import (DataPreProcess, DataResult, get_imggnd, listen_plot, myplot,
                   plot, setpath, splitdata)

# from vit_pytorch.efficient import ViT




if __name__ == '__main__':
    print('='*30)
    warnings.filterwarnings("ignore")
    parser = argparse.ArgumentParser()
    warnings.filterwarnings("ignore")
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset', type=str,  default='./pathology/data/032370b-20x-roi2', metavar='D',
                        help='the dataset path you load')
    parser.add_argument('--trial_number', type=int,  default=1, metavar='T',
                        help='the time you do this trial')
    parser.add_argument('--train_number', type=int,  default=0.01, metavar='NTr', 
                        help='number of training set')
    parser.add_argument('--valid_number', type=int,  default=0.01, metavar='NVa',
                        help='number of valid set')
    parser.add_argument('--test_number', type=int,  default=0.98, metavar='NTe',
                        help='number of test set')
    parser.add_argument('--patchsize', type=int,  default=9, metavar='P',
                        help='patchsize of data') 
    parser.add_argument('--modelname', type=str,  default='SimpledanfengViT', metavar='P', help='which model to choose') 
    parser.add_argument('--gpu_ids', type=int,  default=-1, metavar='G',
                        help='which gpu to use')
    parser.add_argument('--gpu_num', type=int,  default=1, metavar='G',
                        help='how many gpus to use')
    parser.add_argument('--depth', type=int,  default=5, metavar='G',
                        help='depth of NViT')
    parser.add_argument('--loadbestmodel', type=int,  default=1, metavar='L',
                        help='whether load model')
    args = parser.parse_args()
   
    dataset = args.dataset
    NTr = args.train_number
    trialnumber = args.trial_number
    NTe = args.test_number
    NVa = args.valid_number
    patchsize = args.patchsize
    modelname = args.modelname 
    gpu_num = args.gpu_num
    depth = args.depth
    load_bestmodel = args.loadbestmodel
    gpu_ids = args.gpu_ids
    print('训练比例为 训练集{} 验证集{} 测试集{}'.format(NTr, NVa, NTe))
    patchsize = args.patchsize
    modelname = args.modelname
    load_bestmodel = args.loadbestmodel
    print('模型为{}'.format(modelname))

    
    



    resultpath, imagepath, datapath = setpath(dataset, trialnumber , NTr, 
                                                NVa, NTe, modelname)
    


    IMAGE, GND = get_imggnd(dataset)

    try: 
        with open(resultpath + 'result.pkl', 'rb') as f:
            result = pickle.load(f)

        D = DataResult()
        D.y_score = result.y_score
        D.y_true = result.y_true
        D.get_metric()
        print(D.accuracy_score, D.auc, D.precision, D.recall)
    except Exception as e:
        print(e)
    


    if gpu_ids != -1:
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_ids)
    else:
        while True:
            try:
                autoGPU(gpu_num, 'auto')
                break
            except Exception as e :
                print(e)
                pass

    

    model = {
            'SAE': SAE,
            'PURE1DCNN': PURE1DCNN,
            'PURE2DCNN':PURE2DCNN,
            'PURE3DCNN': PURE3DCNN,
            'DBDA': DBDA_network,
            '1DCNN': _1DCNN,
            'CDCNN':CDCNN,
            'SSRN': SSRN_network,
            
            'NDIS_MODEL': NViT,
            'NNDIS_MODEL':NNViT,
            'danfengViT': hongViT,
            'SimpledanfengViT': hongViT(mode='ViT'),
            'NViTBaseline': NViT(num_classes=2, depth=depth)}

    model = model[modelname]
    if isinstance(model, type):
        model = model()
    # spliteddata = splitdata(IMAGE, GND, datapath , trainnum=eval(NTr), validnum=eval(NVa), testnum=eval(NTe))

    
    
    if load_bestmodel:
        t_para = torch.load(resultpath + 'bestmodel.pth')
        try:
            model.load_state_dict(t_para)
        except:

            model.load_state_dict({k.replace('module.', ''):v for k,v in t_para.items()})
        print('读了最佳模型, 继续训练')
    processeddata = DataPreProcess(IMAGE, patchsize, datapath, 1).processeddata

    print('patch 的尺寸为') 
    print(processeddata['train'].patch.shape)

    T = TrainProcess(model=model.to('cuda'),
                    modelname=modelname+str(depth),
                    processeddata=processeddata,
                    train_config='./config_normal.yaml',
                    )
    
    T.evaluate(T.test_loader, T.test_result)
    print(T.test_result.accuracy_score, T.test_result.auc, T.test_result.precision, T.test_result.recall)

