import json

import matplotlib.pyplot as plt
import torch
import cv2
import sys
import argparse
import os
from PIL import Image
import numpy as np
import glob
import time
import shutil
from tqdm import tqdm
import skimage
from network2 import convnext_small,convnext_base, convnext_tiny,convnext_large,convnext_xlarge
from sklearn.metrics import roc_curve, auc, confusion_matrix, precision_score, recall_score, f1_score
import torchvision
from swintransformer import SwinTransformer3D


def vali_trans(ww, lw):
    train_transformI = torchvision.transforms.Compose([
        torchvision.transforms.Resize(lw, interpolation=torchvision.transforms.InterpolationMode.BICUBIC),
        torchvision.transforms.CenterCrop(ww),
        torchvision.transforms.ToTensor(),
    ])
    return train_transformI


def vali_transR(ww, lw):
    train_transformI = torchvision.transforms.Compose([
        torchvision.transforms.Resize(lw, interpolation=torchvision.transforms.InterpolationMode.NEAREST),
        torchvision.transforms.CenterCrop(ww),
        torchvision.transforms.ToTensor(),

    ])
    return train_transformI


def time_sync():
    # pytorch-accurate time
    if torch.cuda.is_available():
        torch.cuda.synchronize()
    return time.time()

def getdata_trans(size, lw, ww, data, seed, transform):
    image = data['image']
    mask = data['mask']
    layer = size[0]
    width = size[1]
    height = size[2]
    if transform == True:
        print("WRONG")
    else:
        transformI = vali_trans(ww, lw)
        transformR = vali_transR(ww, lw)

    mask = mask > 0.1
    mask = (mask * 255).astype(np.uint8)
    index = np.sum(np.sum(mask, -1), -1)
    index = np.where(index > 0)[0]
    if len(index)==0:
        print('qqqqqwwwwjdfifhfkdndbf', id)
    if len(index) < 4:
        z_, x_, y_ = np.where(mask > 0)
        start_z = max(0, min(z_) - 2)
        end_z = min(max(z_) + 2, mask.shape[0])
        start_x = max(0, min(x_) - 5)
        end_x = min(max(x_) + 5, mask.shape[1])
        start_y = max(0, min(y_) - 5)
        end_y = min(max(y_) + 5, mask.shape[2])

        new_mask = np.zeros(mask.shape).astype(np.uint8)
        new_mask[start_z:end_z, start_x:end_x, start_y:end_y] = 255
        mask = new_mask

    torch.manual_seed(seed)
    transformed_data = transformI(Image.fromarray(image[0, :, :]))
    torch.manual_seed(seed)
    transformed_mask = transformR(Image.fromarray(mask[0, :, :]))

    for i in range(image.shape[0] - 1):
        torch.manual_seed(seed)
        trans_data = transformI(Image.fromarray(image[i + 1, :, :]))
        torch.manual_seed(seed)
        trans_mask = transformR(Image.fromarray(mask[i + 1, :, :]))
        transformed_data = torch.cat([transformed_data, trans_data], dim=0)
        transformed_mask = torch.cat([transformed_mask, trans_mask], dim=0)

    transformed_data = (transformed_data.numpy() * 255)
    transformed_mask = transformed_mask.numpy()
    # transformed_mask = transformed_mask > 0.1
    # transformed_mask = (transformed_mask * 255).astype(np.uint8)
    #
    # transformed_data = skimage.transform.resize(transformed_data, (48, 384, 384), anti_aliasing=False,
    #                                             order=1)
    # transformed_mask = skimage.transform.resize(transformed_mask, (48, 384, 384), anti_aliasing=False,
    #                                             order=0)
    # transformed_data = transformed_data.astype(np.uint8)
    # transformed_data = transformed_data / 255.
    # transformed_mask = transformed_mask > 0.1
    # transformed_mask = transformed_mask.astype(np.uint8)
    #
    # index = np.sum(np.sum(transformed_mask, -1), -1)
    # index = np.where(index > 0)[0]
    # if len(index) < 4 and len(index) != 0:
    #     z_, x_, y_ = np.where(transformed_mask > 0)
    #     start_z = max(0, min(z_) - 2)
    #     end_z = min(max(z_) + 2, transformed_mask.shape[0])
    #     start_x = max(0, min(x_) - 2)
    #     end_x = min(max(x_) + 2, transformed_mask.shape[1])
    #     start_y = max(0, min(y_) - 2)
    #     end_y = min(max(y_) + 2, transformed_mask.shape[2])
    #
    #     new_mask = np.zeros(transformed_mask.shape).astype(np.uint8)
    #     new_mask[start_z:end_z, start_x:end_x, start_y:end_y] = 1
    #     transformed_mask = new_mask

    z, x, y = np.where(transformed_mask > 0)
    z_start = np.min(z)
    x_start = np.min(x)
    y_start = np.min(y)
    z_end = np.max(z)
    x_end = np.max(x)
    y_end = np.max(y)
    tumor = transformed_data[z_start:z_end+1, x_start:x_end+1, y_start:y_end+1]
    transformed_tumor = skimage.transform.resize(tumor, output_shape=(layer, width, height), anti_aliasing=None, order=1)
    # transformed_tumor = (transformed_tumor - 0.5) / 0.5

    return transformed_tumor


def getdata(size, data):
    image = data['image']
    mask = data['mask']
    layer = size[0]
    width = size[1]
    height = size[2]
    z, x, y = np.where(mask > 0)
    z_start = np.min(z)
    x_start = np.min(x)
    y_start = np.min(y)
    z_end = np.max(z)
    x_end = np.max(x)
    y_end = np.max(y)
    tumor = image[z_start:z_end+1, x_start:x_end+1, y_start:y_end+1]
    transformed_tumor = skimage.transform.resize(tumor, output_shape=(layer, width, height), anti_aliasing=None, order=1)
    transformed_tumor = (transformed_tumor - 0.5) / 0.5

    return transformed_tumor


@torch.no_grad()
def run(
        model_name='convnext',  # 网络名字
        weights='/home/hzt/whh/pycharm_project_179/runs/May09_15-29-15_ubuntu-SYS-7049GP-TRT/weights/best_model.pth',  # 模型路径
        source='/home/hzt/whh/pycharm_project_179/runs/Apr03_17-31-30_ubuntu-SYS-7049GP-TRT/val.txt',  # 测试数据路径，可以是文件夹，可以是单张图片
        use_cuda=True,  # 是否使用cuda
        view_img=False,  # 是否可视化测试图片
        save_txt=True,  # 是否将结果保存到txt
        project='runs/result',  # 结果输出路径
        # class_indices='class_indices.json'  # json文件，存放类别和索引的关系。在train的时候生成在run文件夹下

):
    device = torch.device("cuda" if torch.cuda.is_available() and use_cuda else "cpu")

    if save_txt:
        if os.path.exists(project):
            shutil.rmtree(project)
        os.makedirs(project)
        f = open(project + "/result.txt", 'w')

    # load model
    def get_label(tmp_subfolder):
        label_dic = {'HCC': 3, 'CYST': 0, 'FNH': 1, 'HA': 2, 'ICC': 4, 'META': 5, 'Hemangioma': 2, 'nodule': 0}
        if 'HCC' in tmp_subfolder:
            return label_dic['HCC']
        elif 'CYST' in tmp_subfolder:
            return label_dic['CYST']
        elif 'FNH' in tmp_subfolder:
            return label_dic['FNH']
        elif 'HA' in tmp_subfolder:
            return label_dic['HA']
        elif 'ICC' in tmp_subfolder:
            return label_dic['ICC']
        elif 'META' in tmp_subfolder:
            return label_dic['META']
        elif 'Hemangioma' in tmp_subfolder:
            return label_dic['Hemangioma']
        elif 'nodule' in tmp_subfolder:
            return label_dic['nodule']

    assert os.path.exists(weights), "model path: {} does not exists".format(weights)
    if model_name == 'convnext':
        model = convnext_base(num_classes=6)
    else:
        model = None

    model.load_state_dict(torch.load(weights, map_location=device), strict=False)
    # model.eval().to(device)
    model.eval()
    model.to(device)

    # run once
    y = model(torch.rand(1, 3, 64, 64, 64).to(device))

    # load img
    assert os.path.exists(source), "data source: {} does not exists".format(source)
    if os.path.isdir(source):
        files = sorted(glob.glob(os.path.join(source, '*.*')))
    elif os.path.isfile(source):
        # img = Image.open(source)
        # if img.mode != 'RGB':
        #     img = img.convert('RGB')
        files = [source]
    else:
        raise Exception(f'ERROR: {source} does not exist')

    pred_class = []
    prob = []
    images_path = []
    images_label =[]
    with open(source, 'r') as file:
        # 逐行读取文件内容
        for line in file:
            images_path.append(line)
            images_path = [line.rstrip('\n') for line in images_path]

    # images_path=images_path[:10]

    num_images = len(images_path)
    for i in range(num_images):
        images_label.append(get_label(images_path[i]))

    ##调参
    size = (64, 64, 64)
    lw = 448
    ww = 384
    seed = np.random.randint(0, 2 ** 16)

    for img_path in images_path:
        img_path2 = os.path.join(img_path, 'a.npz')
        img_path3 = os.path.join(img_path, 'p.npz')
        img_path4 = os.path.join(img_path, 'v.npz')
        data2 = np.load(img_path2)
        data3 = np.load(img_path3)
        data4 = np.load(img_path4)
        tumor_a = getdata_trans(size=size, data=data2, lw=lw, ww=ww, seed=seed, transform=False).astype(np.float32)
        tumor_p = getdata_trans(size=size, data=data3, lw=lw, ww=ww, seed=seed, transform=False).astype(np.float32)
        tumor_v = getdata_trans(size=size, data=data4, lw=lw, ww=ww, seed=seed, transform=False).astype(np.float32)
        # tumor_a = getdata(size=size, data=data2).astype(np.float32)
        # tumor_p = getdata(size=size, data=data3).astype(np.float32)
        # tumor_v = getdata(size=size, data=data4).astype(np.float32)
        tumor_a = torch.tensor(tumor_a)
        tumor_p = torch.tensor(tumor_p)
        tumor_v = torch.tensor(tumor_v)

        tumor_set = [tumor_a, tumor_p, tumor_v]
        img_tensor = torch.stack(tumor_set)
        img_tensor = torch.unsqueeze(img_tensor, dim=0)

        pred = model(img_tensor.to(device))
        pred_class.append(torch.argmax(pred).cpu().numpy().item())

        # 遍历每个样本
        p = torch.softmax(pred, dim=1)
        prob.append(p.cpu().numpy())

    #accuracy

    prob = np.array(prob)
    prob = prob.squeeze()
    gt = np.array(images_label)
    fpr = []
    tpr = []
    roc_auc = []

    # 循环处理每一列数据
    for i in range(6):
        # 获取prob张量的第i列数据
        tmp_prob = prob[:, i]
        # 将多类别标签转换为二分类标签
        tmp_gt = (gt == i).astype(int)
        # 计算ROC曲线和AUC
        fpr_i, tpr_i, _ = roc_curve(tmp_gt, tmp_prob)
        auc_i = auc(fpr_i, tpr_i)
        # 将结果添加到列表中
        fpr.append(fpr_i)
        tpr.append(tpr_i)
        roc_auc.append(auc_i)
    #'HCC': 3, 'CYST': 0, 'FNH': 1, 'HA': 2, 'ICC': 4, 'META': 5, 'Hemangioma': 2, 'nodule': 0
    plt.figure()
    for i in range (6):
        if i==0:
            tumor_name = "CYST"
        elif i==1:
            tumor_name = "FNH"
        elif i==2:
            tumor_name = "Hemangioma"
        elif i==3:
            tumor_name = "HCC"
        elif i==4:
            tumor_name = "ICC"
        elif i==5:
            tumor_name = "META"
        plt.plot(fpr[i], tpr[i], lw=2, label=f'ROC curve {tumor_name} (area = %0.4f)' % roc_auc[i])
        plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('Convnext-B (ROC)')
        plt.legend(loc="lower right")
    plt.show()

    pred_class = np.array(pred_class)
    acc = np.sum(pred_class == gt)/len(images_label)
    print("Accuracy:", acc)

    cm = confusion_matrix(gt, pred_class)
    print(cm)

    precision = precision_score(gt, pred_class, average='macro')
    print("precision = ", precision)

    rs = recall_score(gt, pred_class, average='macro')
    print("recall score = ", rs)

    f1 = f1_score(gt, pred_class, average='macro')
    print("f1 score = ", f1)

    a_p = precision_score(gt, pred_class, average='weighted')
    print("avarage precision score = ", a_p)

    a_rs = recall_score(gt, pred_class, average='weighted')
    print("average recall score = ", a_rs)

    a_f1 = f1_score(gt, pred_class, average='weighted')
    print("average f1 score = ", a_f1)
    f.close()


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--model-name', type=str, default='convnext')
    parser.add_argument('--weights', type=str, default='/home/hzt/whh/pycharm_project_179/runs/May09_15-29-15_ubuntu-SYS-7049GP-TRT/weights/best_model.pth', help='the model path')
    parser.add_argument('--source', type=str, default='/home/hzt/whh/pycharm_project_179/runs/Apr03_17-31-30_ubuntu-SYS-7049GP-TRT/val.txt', help='test data path')
    parser.add_argument('--use-cuda', type=bool, default=True)
    parser.add_argument('--view-img', type=bool, default=False)
    parser.add_argument('-s', '--save-txt', type=bool, default=True)
    parser.add_argument('--project', type=str, default='runs/result', help='output path')
    #parser.add_argument('--class-indices', type=str, default='class_indices.json',
    #                   help='when train,the file will generate')
    opt = parser.parse_args()
    run(**vars(opt))
