import json

import matplotlib.pyplot as plt
import torch
import cv2
import sys
import argparse
import os
from PIL import Image
import numpy as np
import glob
import time
import shutil
from tqdm import tqdm
import skimage
from network import convnext_small,convnext_base, convnext_tiny,convnext_large,convnext_xlarge
from sklearn.metrics import roc_curve, auc
import torchvision


def Trans(degree, ww, lw):
    train_transformI = torchvision.transforms.Compose([
        torchvision.transforms.Resize(lw, interpolation=torchvision.transforms.InterpolationMode.BICUBIC),
        torchvision.transforms.RandomRotation((-degree, degree),
                                              interpolation=torchvision.transforms.InterpolationMode.BICUBIC),

        torchvision.transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),

        torchvision.transforms.CenterCrop(ww),

        torchvision.transforms.ToTensor(),

    ])
    return train_transformI


def TransR(degree, ww, lw):
    train_transformI = torchvision.transforms.Compose([
        torchvision.transforms.Resize(lw, interpolation=torchvision.transforms.InterpolationMode.NEAREST),
        # torchvision.transforms.RandomRotation((-degree, degree),
        #                                       interpolation=torchvision.transforms.InterpolationMode.NEAREST),
        # torchvision.transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),

        torchvision.transforms.CenterCrop(ww),

        torchvision.transforms.ToTensor(),
    ])
    return train_transformI


def vali_trans(ww, lw):
    train_transformI = torchvision.transforms.Compose([
        torchvision.transforms.Resize(lw, interpolation=torchvision.transforms.InterpolationMode.BICUBIC),
        torchvision.transforms.CenterCrop(ww),
        torchvision.transforms.ToTensor(),
    ])
    return train_transformI


def vali_transR(ww, lw):
    train_transformI = torchvision.transforms.Compose([
        torchvision.transforms.Resize(lw, interpolation=torchvision.transforms.InterpolationMode.NEAREST),
        torchvision.transforms.CenterCrop(ww),
        torchvision.transforms.ToTensor(),

    ])
    return train_transformI


def time_sync():
    # pytorch-accurate time
    if torch.cuda.is_available():
        torch.cuda.synchronize()
    return time.time()

def mean_std(data):
    std_ = [0.229, 0.224, 0.225]
    mean_ = [0.485, 0.456, 0.406]
    for i in range(data.shape[0]):
        if i % 3 == 0:
            data[i, :, :] = (data[i, :, :] - mean_[0]) / std_[0]
        elif i % 3 == 1:
            data[i, :, :] = (data[i, :, :] - mean_[1]) / std_[1]
        else:
            data[i, :, :] = (data[i, :, :] - mean_[2]) / std_[2]

    return data

def getdata(size, data, lw, ww, degree):
    lw = lw
    degree = degree
    lyers = size[0]
    width = size[1]
    height = size[2]
    transformI = vali_trans(ww=width, lw=lw)

    image = data['image']
    # image = image.astype(np.float32) #会不会出现数据不对的情况？
    # image = torch.tensor(image)
    # mask = data['mask']
    # mask = mask>0.1
    # mask = (mask * 255).astype(np.uint8)
    #
    # index = np.sum(np.sum(mask, -1), -1)
    # index = np.where(index > 0)[0]
    # if len(index)==0:
    #     print('qqqqqwwwwjdfifhfkdndbf', id)
    # if len(index) < 4:
    #     z_, x_, y_ = np.where(mask > 0)
    #     start_z = max(0, min(z_) - 2)
    #     end_z = min(max(z_) + 2, mask.shape[0])
    #     start_x = max(0, min(x_) - 5)
    #     end_x = min(max(x_) + 5, mask.shape[1])
    #     start_y = max(0, min(y_) - 5)
    #     end_y = min(max(y_) + 5, mask.shape[2])
    #
    #     new_mask = np.zeros(mask.shape).astype(np.uint8)
    #     new_mask[start_z:end_z, start_x:end_x, start_y:end_y] = 255
    #     mask = new_mask
    # torch.manual_seed(seed)

    transformed_data = transformI(Image.fromarray(image[0, :, :]))
    # transformed_data = transformI(image[0, :, :])
    # torch.manual_seed(seed)
    # transformed_mask = transformR(Image.fromarray(mask[0, :, :]))
    sst=time.time()
    for i in range(image.shape[0] - 1):
        # torch.manual_seed(seed)
        trans_data = transformI(Image.fromarray(image[i + 1, :, :]))
        # torch.manual_seed(seed)
        # trans_mask = transformR(Image.fromarray(mask[i + 1, :, :]))
        transformed_data = torch.cat([transformed_data, trans_data], dim=0)
        # transformed_mask = torch.cat([transformed_mask, trans_mask], dim=0)
    eet=time.time()
    # print(eet-sst)
    transformed_data = (transformed_data.numpy() * 255)
    # transformed_mask = transformed_mask.numpy()
    # transformed_mask = transformed_mask > 0.1
    # transformed_mask = (transformed_mask * 255).astype(np.uint8)
    ssst=time.time()
    transformed_data = skimage.transform.resize(transformed_data, (lyers, width, height), anti_aliasing=False,
                                                order=1)
    # transformed_mask = skimage.transform.resize(transformed_mask, (lyers, width, height), anti_aliasing=False,
    #                                             order=0)
    eeet=time.time()
    # print(eeet-ssst)
    transformed_data = transformed_data.astype(np.uint8)

    transformed_data = transformed_data / 255.
    # transformed_data = (transformed_data-np.mean(transformed_data))/ np.std(transformed_data)
    # transformed_data = mean_std(transformed_data)
    transformed_data = transformed_data.astype(np.float32)
    # transformed_mask = transformed_mask > 0.1
    # transformed_mask = transformed_mask.astype(np.uint8)
    # index = np.sum(np.sum(transformed_mask, -1), -1)
    # index = np.where(index > 0)[0]
    # st=time.time()
    # if len(index) < 4 and len(index) != 0:
    #     z_, x_, y_ = np.where(transformed_mask > 0)
    #     start_z = max(0, min(z_) - 2)
    #     end_z = min(max(z_) + 2, transformed_mask.shape[0])
    #     start_x = max(0, min(x_) - 2)
    #     end_x = min(max(x_) + 2, transformed_mask.shape[1])
    #     start_y = max(0, min(y_) - 2)
    #     end_y = min(max(y_) + 2, transformed_mask.shape[2])
    #
    #     new_mask = np.zeros(transformed_mask.shape).astype(np.uint8)
    #     new_mask[start_z:end_z, start_x:end_x, start_y:end_y] = 1
    #     transformed_mask = new_mask
    et=time.time()
    # print(et-st)
    return transformed_data
@torch.no_grad()
def run(
        model_name='convnext',  # 网络名字
        weights='/home/hzt/whh/pycharm_project_179/runs/Apr25_20-59-42_ubuntu-SYS-7049GP-TRT/weights/best_model.pth',  # 模型路径
        source='/home/hzt/whh/pycharm_project_179/val.txt',  # 测试数据路径，可以是文件夹，可以是单张图片
        use_cuda=True,  # 是否使用cuda
        view_img=False,  # 是否可视化测试图片
        save_txt=True,  # 是否将结果保存到txt
        project='runs/result',  # 结果输出路径
        # class_indices='class_indices.json'  # json文件，存放类别和索引的关系。在train的时候生成在run文件夹下

):
    device = torch.device("cuda" if torch.cuda.is_available() and use_cuda else "cpu")

    if save_txt:
        if os.path.exists(project):
            shutil.rmtree(project)
        os.makedirs(project)
        f = open(project + "/result.txt", 'w')

    # load model
    def get_label(tmp_subfolder):
        label_dic = {'HCC': 3, 'CYST': 0, 'FNH': 1, 'HA': 2, 'ICC': 4, 'META': 5, 'Hemangioma': 2, 'nodule': 0}
        if 'HCC' in tmp_subfolder:
            return label_dic['HCC']
        elif 'CYST' in tmp_subfolder:
            return label_dic['CYST']
        elif 'FNH' in tmp_subfolder:
            return label_dic['FNH']
        elif 'HA' in tmp_subfolder:
            return label_dic['HA']
        elif 'ICC' in tmp_subfolder:
            return label_dic['ICC']
        elif 'META' in tmp_subfolder:
            return label_dic['META']
        elif 'Hemangioma' in tmp_subfolder:
            return label_dic['Hemangioma']
        elif 'nodule' in tmp_subfolder:
            return label_dic['nodule']

    assert os.path.exists(weights), "model path: {} does not exists".format(weights)
    if model_name == 'convnext':
        model = convnext_tiny(num_classes=5)
    else:
        model = None

    model.load_state_dict(torch.load(weights, map_location=device), strict=False)
    model.eval().to(device)

    # run once
    y = model(torch.rand(1, 3, 48, 64, 64).to(device))

    # load img
    assert os.path.exists(source), "data source: {} does not exists".format(source)
    if os.path.isdir(source):
        files = sorted(glob.glob(os.path.join(source, '*.*')))
    elif os.path.isfile(source):
        # img = Image.open(source)
        # if img.mode != 'RGB':
        #     img = img.convert('RGB')
        files = [source]
    else:
        raise Exception(f'ERROR: {source} does not exist')

    # images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
    pred_class = []
    prob = []
    images_path = []
    images_label =[]
    with open(source, 'r') as file:
        # 逐行读取文件内容
        for line in file:
            images_path.append(line)
            images_path = [line.rstrip('\n') for line in images_path]

    images_path=images_path[:10]

    num_images = len(images_path)
    for i in range(num_images):
        images_label.append(get_label(images_path[i]))

    batch_size = 16
    size = (48, 64, 64)
    lw = 64
    ww = 64
    degree = 45
    for img_path in images_path:
        img_path2 = os.path.join(img_path, 'a.npz')
        img_path3 = os.path.join(img_path, 'p.npz')
        img_path4 = os.path.join(img_path, 'v.npz')
        data2 = np.load(img_path2)
        data3 = np.load(img_path3)
        data4 = np.load(img_path4)
        # image_a = data2['image']
        # image_a = image_a.astype(np.float32)
        # mask_a = data2['mask']
        # image_p = data3['image']
        # image_p = image_p.astype(np.float32)
        # mask_p = data3['mask']
        # image_v = data4['image']
        # image_v = image_v.astype(np.float32)
        # mask_v = data4['mask']
        transformed_data_a = getdata(size, data2, lw, ww, degree)
        transformed_data_p = getdata(size, data3, lw, ww, degree)
        transformed_data_v = getdata(size, data4, lw, ww, degree)
        # z_a, x_a, y_a = np.where(mask_a > 0)
        # z_a_start = np.min(z_a)
        # z_a_end = np.max(z_a)
        # x_a_start = np.min(x_a)
        # x_a_end = np.max(x_a)
        # y_a_start = np.min(y_a)
        # y_a_end = np.max(y_a)
        #
        # z_p, x_p, y_p = np.where(mask_p > 0)
        # z_p_start = np.min(z_p)
        # z_p_end = np.max(z_p)
        # x_p_start = np.min(x_p)
        # x_p_end = np.max(x_p)
        # y_p_start = np.min(y_p)
        # y_p_end = np.max(y_p)
        #
        # z_v, x_v, y_v = np.where(mask_v > 0)
        # z_v_start = np.min(z_v)
        # z_v_end = np.max(z_v)
        # x_v_start = np.min(x_v)
        # x_v_end = np.max(x_v)
        # y_v_start = np.min(y_v)
        # y_v_end = np.max(y_v)
        #
        # tumor_a = image_a[z_a_start:z_a_end+1 , x_a_start:x_a_end+1, y_a_start:y_a_end+1]
        # tumor_p = image_p[z_p_start:z_p_end+1 , x_p_start:x_p_end+1, y_p_start:y_p_end+1]
        # tumor_v = image_v[z_v_start:z_v_end+1 , x_v_start:x_v_end+1, y_v_start:y_v_end+1]
        #
        # transformed_tumor_a = skimage.transform.resize(tumor_a, output_shape=(32, 64, 64), anti_aliasing=None, order=1)
        # transformed_tumor_p = skimage.transform.resize(tumor_p, output_shape=(32, 64, 64), anti_aliasing=None, order=1)
        # transformed_tumor_v = skimage.transform.resize(tumor_v, output_shape=(32, 64, 64), anti_aliasing=None, order=1)
        transformed_tumor_a=torch.tensor(transformed_data_a)
        transformed_tumor_p=torch.tensor(transformed_data_p)
        transformed_tumor_v=torch.tensor(transformed_data_v)

        transformed_tumor = [transformed_tumor_a, transformed_tumor_p, transformed_tumor_v]
        # transformed_tumor = torch.stack(transformed_tumor)
        # 改成五维tensor
        # img_tensor = data_transform(img)[None, :]
        img_tensor = torch.stack(transformed_tumor)
        img_tensor = torch.unsqueeze(img_tensor, dim=0)

        pred = model(img_tensor.to(device))
        pred_class.append(torch.argmax(pred).cpu().numpy().item())

        # 遍历每个样本
        # c = pred_class.cpu().numpy().item()
        # prob = torch.softmax(pred[i], dim=0)[pred_class].cpu().numpy().item()
        p = torch.softmax(pred, dim=1)
        prob.append(p.cpu().numpy())
        # print("class: {}\tprob: {:.3}\t Done.".format(c, p))
    #accuracy


    prob = np.array(prob)
    prob = prob.squeeze()
    gt = np.array(images_label)
    fpr = []
    tpr = []
    roc_auc = []

    # 循环处理每一列数据
    for i in range(5):
        # 获取prob张量的第i列数据
        tmp_prob = prob[:, i]
        # 将多类别标签转换为二分类标签
        tmp_gt = (gt == i).astype(int)
        # 计算ROC曲线和AUC
        fpr_i, tpr_i, _ = roc_curve(tmp_gt, tmp_prob)
        auc_i = auc(fpr_i, tpr_i)
        # 将结果添加到列表中
        fpr.append(fpr_i)
        tpr.append(tpr_i)
        roc_auc.append(auc_i)
    #'HCC': 3, 'CYST': 0, 'FNH': 1, 'HA': 2, 'ICC': 4, 'META': 5, 'Hemangioma': 2, 'nodule': 0
    plt.figure()
    for i in range (5):
        if i==0:
            tumor_name = "CYST or nodule"
        elif i==1:
            tumor_name = "FNH"
        elif i==2:
            tumor_name = "HA or Hemangioma"
        elif i==3:
            tumor_name = "HCC"
        elif i==4:
            tumor_name = "ICC"
        elif i==5:
            tumor_name = "META"
        plt.plot(fpr[i], tpr[i], lw=2, label=f'ROC curve {tumor_name} (area = %0.4f)' % roc_auc[i])
        plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('ConvNeXt-Tiny (ROC)')
        plt.legend(loc="lower right")
    plt.show()

    pred_class = np.array(pred_class)
    acc = np.sum(pred_class == gt)/len(images_label)
    print("Accuracy:", acc)



    print (1)
    f.close()


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--model-name', type=str, default='convnext')
    parser.add_argument('--weights', type=str, default='/home/hzt/whh/pycharm_project_179/runs/Apr25_20-59-42_ubuntu-SYS-7049GP-TRT/weights/best_model.pth', help='the model path')
    parser.add_argument('--source', type=str, default='/home/hzt/whh/pycharm_project_179/val.txt', help='test data path')
    parser.add_argument('--use-cuda', type=bool, default=True)
    parser.add_argument('--view-img', type=bool, default=False)
    parser.add_argument('-s', '--save-txt', type=bool, default=True)
    parser.add_argument('--project', type=str, default='runs/result', help='output path')
    #parser.add_argument('--class-indices', type=str, default='class_indices.json',
    #                   help='when train,the file will generate')
    opt = parser.parse_args()
    run(**vars(opt))
