import argparse
import os
import pickle
import warnings
import xml.etree.ElementTree as ET

import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import spectral.io.envi as envi
import torch
from spectral import imshow, view_cube
# from ViT import ViT
from vit_pytorch import ViT

from AutoGPU import autoGPU
from GAN_training_utils import TrainProcess, setup_seed
from models import (_1DCNN, _2DCNN, _3DCNN, _3DCNN_1DCNN, _3DCNN_AM, PURE3DCNN,
                    PURE3DCNN_2AM, SAE, SAE_AM, DBDA_network, HamidaEtAl,
                    LeeEtAl, SSRN_network, myknn, mysvm)
from myTrans2 import Generator
from NViT import myViT
from utils import DataPreProcess, MyDataset, myplot, plot, setpath, splitdata

model = myViT(
                num_classes = 2,
                dim = 10,
                depth = 6,
                heads = 16,
                mlp_dim = 2048,
                dropout = 0.1,
                emb_dropout = 0.1
                ).cuda()

model.load_state_dict(torch.load('best1.pth'))


setup_seed(1993)
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str,  default='pathology', metavar='D',
        help='the dataset path you load')
parser.add_argument('--trial_number', type=int,  default=1, metavar='T',
        help='the time you do this trial')
parser.add_argument('--train_number', type=int,  default=1000, metavar='NTr', 
        help='number of training set')
parser.add_argument('--valid_number', type=int,  default=1000, metavar='NVa',
        help='number of valid set')
parser.add_argument('--test_number', type=int,  default=1000, metavar='NTe',
        help='number of test set')
parser.add_argument('--patchsize', type=int,  default=9, metavar='P',
        help='patchsize of data') 
parser.add_argument('--modelname', type=str,  default='MyViT', metavar='P', help='which model to choose') 
parser.add_argument('--gpu_ids', type=int,  default=-1, metavar='G',
        help='which gpu to use')


os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" 
args = parser.parse_args()
gpu_ids = args.gpu_ids
if gpu_ids != -1:   
    os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_ids)
else:
    autoGPU(1, 'auto')
dataset = args.dataset
NTr = args.train_number
trialnumber = args.trial_number
NTe = args.test_number
NVa = args.valid_number
patchsize = args.patchsize
modelname = args.modelname



dark_ref = envi.open('./Cholangiocarcinoma/032310-20x-roi3.hdr','./Cholangiocarcinoma/032310-20x-roi3.raw')
dark_nparr = np.array(dark_ref.load())
tree = ET.parse('./Cholangiocarcinoma/032310-20x-roi3.xml')

# get root element
polygon = tree.getroot().findall('object/polygon')

mask = np.zeros((dark_nparr.shape[0], dark_nparr.shape[1]), dtype="uint8")
for p in polygon:
    x = p.findall('pt/x')
    y = p.findall('pt/y')
    x_coor = list(map(lambda x:int(x.text), x))
    y_coor = list(map(lambda y:int(y.text), y))
    c = []
    for x, y in zip(x_coor, y_coor):
        c.append([x, y])
    cor_xy = np.array(c)
    # cor_xy = np.hstack(mas(x_coor, y_coor))
    cv2.polylines(mask, np.int32([cor_xy]), 1, 1)
    cv2.fillPoly(mask, np.int32([cor_xy]), 1)



resultpath, imagepath, datapath = setpath(dataset, trialnumber , NTr, NVa, NTe, modelname)

IMAGE = np.flipud(dark_nparr)
GND = mask
spliteddata = splitdata(IMAGE, GND, datapath , trainnum=NTr, validnum=NVa, testnum=NTe)

processeddata = DataPreProcess(IMAGE, patchsize, datapath, 1).processeddata

data_mix = {
    'train_patch': processeddata['train'].patch.transpose(0, 3, 2, 1),
    'train_gt': processeddata['train'].gt,
    'test_patch': processeddata['test'].patch.transpose(0, 3, 2, 1),
    'test_gt': processeddata['test'].gt,
    'valid_patch': None if processeddata['valid'] is None else processeddata['valid'].patch.transpose(0, 3, 2, 1), 
    'valid_gt': None if processeddata['valid'] is None else processeddata['valid'].gt,
}

test_dataset = MyDataset(data_mix['test_gt'], data_mix['test_patch'])
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset,
batch_size=128,
shuffle=False
)

from utils import DataResult

data_result = DataResult()
with torch.no_grad():
    for batch_idx, data in enumerate(test_loader):
        for idx, item in enumerate(data):
            data[idx] = item.to('cuda')
        images, labels = data
        model.eval()
        _, outputs = model(images.float())  # forward
        #outputs.detach_()  # 不求梯度
        # loss = torch.nn.CrossEntropyLoss()(outputs, labels.long())  # 计算loss

        _, predicted = torch.max(outputs.data, 1)  # 统计
        # 统计混淆矩阵
        data_result.y_pre+= list(predicted.cpu().numpy())
        data_result.y_true += list(labels.cpu().numpy())
    data_result.get_confmat()
    print(data_result.conf_mat.trace() / data_result.conf_mat.sum())

