import os
import torch
import random
import argparse
import numpy as np
import SimpleITK as sitk
import torch.nn.functional as F
from PredictTCL_main.contrastive_disentangle_fusion5 import ContrastiveDisFusion


parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=666)
parser.add_argument('--method_name', type=str, default='con_DisFusion') # att_hardDisFusion, earlyFusion, middleFusion, middleFusion2, att_hardDisFusion2, con_DisFusion, embraceFusion, maml, nestedFormer, misa
parser.add_argument('--dataset_name', type=str, default='men') # brats, men, brats2, BRCA, ABIDE, mrnet-acl, mrnet-meniscus, nestedFormer
parser.add_argument('--run_type', type=str, default='test') # train, test, vis, heatmap, vis_space
parser.add_argument('--heatmap_type', type=str, default='cs') # pcc, cs(cosin similiarity)
parser.add_argument('--multi_gpus', type=bool, default=False)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--pretrained', type=int, default=0)
parser.add_argument('--ft_rate', type=float, default=0.01)
parser.add_argument('--model_depth', type=int, default=18)
parser.add_argument('--fold_train', type=int, default=123)
parser.add_argument('--train_lr_men', type=float, default=0.0001)
parser.add_argument('--train_lr_abide', type=float, default=0.0001)
parser.add_argument('--train_lr_brats', type=float, default=0.00001)
parser.add_argument('--train_lr_mrnet', type=float, default=0.00001) #proposed 0.0005
parser.add_argument('--lr_gamma', type=float, default=1)
parser.add_argument('--cls_drop', type=float, default=0.5)
parser.add_argument('--encoder_drop', type=float, default=0.5)
parser.add_argument('--map_drop', type=float, default=0.5)
parser.add_argument('--l2_value', type=float, default=0.0001)
parser.add_argument('--loss_type', type=str, default='ce') # ce, focal
parser.add_argument('--focal_alpha', type=float, default=0.25)
parser.add_argument('--focal_gamma', type=float, default=2)
parser.add_argument('--mixup', type=bool, default=False)
parser.add_argument('--encoder_share', type=bool, default=False)
parser.add_argument('--testTraindata', type=bool, default=False) # test_traindata
parser.add_argument('--train_epochs', type=int, default=100)
parser.add_argument('--train_epochs_BRCA', type=int, default=1000)
parser.add_argument('--train_epochs_ABIDE', type=int, default=800)
parser.add_argument('--lambda_epochs', type=int, default=30)
parser.add_argument('--dif_weight_type', type=str, default=None)
parser.add_argument('--en_disnum', type=int, default=5)
parser.add_argument('--fusion_type', type=str, default='cma') # concat, sa, fc_sa, conv_sa, cma, tsa(self attention with transformer)
parser.add_argument('--cma_type', type=str, default='cascade_comp_inf') # ab(ablation study,三个k concat然后和qv做attention), multi_q+map, comp_inf, cascade_comp_inf
parser.add_argument('--attention_type', type=str, default='sigmoid') # sigmoid
parser.add_argument('--server', type=str, default='self') # self, wenwu
parser.add_argument('--test_multi', type=bool, default=False) # 测试时导入的模型是否是多卡跑的
parser.add_argument('--ifl1', type=bool, default=False)
parser.add_argument('--ifdisen', type=int, default=1)
parser.add_argument('--if_offline_data_aug', type=int, default=1)
parser.add_argument('--ifdis_sup', type=int, default=1)
parser.add_argument('--ifconloss', type=str, default='T')
parser.add_argument('--ifsa_qkv', type=str, default='T')
parser.add_argument('--ifinter_sa', type=str, default='F')
# parser.add_argument('--ifresidual_v', type=str, default='T')
# parser.add_argument('--ifsa_addrelu', type=str, default='T')
parser.add_argument('--ifaux_spec', type=str, default='F')
parser.add_argument('--ifaux_inter', type=str, default='F')
parser.add_argument('--ifrecon', type=str, default='T')
parser.add_argument('--ifcross_recon', type=str, default='F')
parser.add_argument('--ifsuploss', type=int, default=1)
# parser.add_argument('--add_nonlinear', type=str, default='F')
parser.add_argument('--ifse_agg', type=str, default='F')
parser.add_argument('--cascade_order', type=str, default='order') # order, reverse
parser.add_argument('--w_main_cls', type=float, default=1)
parser.add_argument('--w_spec', type=float, default=1)
parser.add_argument('--w_inter', type=float, default=1)
parser.add_argument('--w_recon', type=float, default=1)
parser.add_argument('--w_cross_recon', type=float, default=1)
parser.add_argument('--w_sup', type=float, default=1)
parser.add_argument('--w_con', type=float, default=1)
parser.add_argument('--out_channels', type=float, default=16)
parser.add_argument('--threshold', type=float, default=0.5)
parser.add_argument('--tau', type=float, default=0.07)
parser.add_argument('--lambda_maml', type=float, default=0)
parser.add_argument('--sim_type', type=str, default='l2norm') # cosine,MI,l2norm
parser.add_argument('--sup_loss_type', type=str, default='l2norm') # orthogonality,l2norm
parser.add_argument('--inter_fusion_type', type=str, default='mean') # mean,concat
parser.add_argument('--inter_sup_fusion_type', type=str, default='concat') # concat, at(autoencoder)
parser.add_argument('--inter_inter_sup_fusion_type', type=str, default='concat')
parser.add_argument('--save_epoch_start', type=int, default=5)
parser.add_argument('--early', type=int, default=1000)
parser.add_argument('--step_size', type=int, default=100)
parser.add_argument('--ifwarmup', type=bool, default=True)
parser.add_argument('--vl_hidden', type=int, default=128)
parser.add_argument('--n_head', type=int, default=2)
parser.add_argument('--vl_dropout', type=float, default=0.5)
parser.add_argument('--vl_nlayer', type=int, default=1)
parser.add_argument('--cma_vl_hidden', type=int, default=64)
parser.add_argument('--cma_n_head', type=int, default=1)
parser.add_argument('--cma_vl_dropout', type=float, default=0.5)
parser.add_argument('--uncertainty', type=str, default='F')
parser.add_argument('--annealing_step', type=int, default=30)
parser.add_argument('--online_aug_type', type=str, default='random_crop') # random crop, noise, flip


args = parser.parse_args()


if args.multi_gpus:
    os.environ['CUDA_VISIBLE_DEVICES'] = '0,2'
    local_rank = int(os.environ["LOCAL_RANK"])

    if local_rank != -1:
        torch.cuda.set_device(local_rank)
        device=torch.device("cuda", local_rank)
        torch.distributed.init_process_group(backend="nccl", init_method='env://')
else:
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)


num_classes = 3

torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)  # if you are using multi-GPU.
np.random.seed(args.seed)  # Numpy module.
random.seed(args.seed)  # Python random module.
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True


cuda = True if torch.cuda.is_available() else False


def Standardize(images):
    """
    Apply Z-score normalization to a given input tensor, i.e. re-scaling the values to be 0-mean and 1-std.
    Mean and std parameter have to be provided explicitly.
    new: z-score is used but keep the background with zero!
    """
    if images.ndim == 3:
        images = np.expand_dims(images, axis=0)
    mask_location = images.sum(0) > 0
    for k in range(images.shape[0]):
        image = images[k,...]
        image = np.array(image, dtype = 'float32')
        mask_area = image[mask_location]
        image[mask_location] -= mask_area.mean()
        image[mask_location] /= mask_area.std()
        images[k,...] = image
    return images


def multi_model_Standard(image):
    for i in range(image.shape[0]):
        img = image[i, ...]
        new_img = Standardize(img)
        if i == 0:
            out_image = new_img
        else:
            out_image = np.concatenate((out_image, new_img), axis=0)
    out_image = out_image.copy()
    return out_image


# def get_image(path):
#     img = sitk.ReadImage(path)
#     img = sitk.GetArrayFromImage(img)
#     img = img.transpose((1, 2, 0)).astype(float)
#     img = np.clip(img, 0, 1200)
#     img = np.expand_dims(img, axis=0)
#     return img

def get_image(path):
    img=np.load(path)
    img=img.astype(np.float32)
    img = np.clip(img, 0, 1200)
    img = np.expand_dims(img, axis=0)
    return img

def load_data(image_path):
    t1_img = get_image(image_path+'/t1_bbox.npz.npy')
    t2_img = get_image(image_path+'/t2_bbox.npz.npy')
    adc_img = get_image(image_path+'/adc_bbox.npz.npy')

    input_img = np.concatenate((t1_img, t2_img, adc_img), axis=0)
    img_out = multi_model_Standard(input_img)
    return img_out


def one_hot(x, class_count):
    return np.eye(class_count)[x,:]


def test(image, weight_saved_path_list):
    criterion = torch.nn.CrossEntropyLoss(reduction='mean')
    model = ContrastiveDisFusion(args=args, criterion=criterion, num_classes=num_classes, dim_list=False)
    image = np.expand_dims(image, axis=0)  # 增加一维 batch
    image = torch.from_numpy(image)  # 转为 tensor
    image = image.type(torch.FloatTensor)
    image = image.cuda()

    for i in range(len(weight_saved_path_list)):
        model.load_state_dict(torch.load(weight_saved_path_list[i]))
        model = model.cuda()
        model.eval()
        out = model(image,run_type='test')['out_prediction']
        if i == 0:
            pred = F.softmax(out, dim=1)
        else:
            pred = torch.cat((pred, F.softmax(out, dim=1)), dim=0)

    pred_all = torch.mean(pred, dim=0)
    pred_final = pred_all.argmax(dim=0)

    if pred_final == 0:
        inv_pred = 0
        men_pred = 0
    elif pred_final == 1:
        inv_pred = 1
        men_pred = 1
    elif pred_final == 2:
        inv_pred = 0
        men_pred = 1

    return inv_pred, men_pred


def test_fold(image,weight_saved_path):
    criterion = torch.nn.CrossEntropyLoss(reduction='mean')
    model = ContrastiveDisFusion(args=args, criterion=criterion, num_classes=num_classes, dim_list=False)
    image = np.expand_dims(image, axis=0)  # 增加一维 batch
    image = torch.from_numpy(image)  # 转为 tensor
    image = image.type(torch.FloatTensor)
    image = image.cuda()

    model.load_state_dict(torch.load(weight_saved_path))
    model = model.cuda()
    model.eval()
    out = model(image, run_type='test')['out_prediction']
    pred = out.argmax(dim=-1)
    
    if pred == 0:
        inv_pred = 0
        men_pred = 0
    elif pred == 1:
        inv_pred = 1
        men_pred = 1
    elif pred == 2:
        inv_pred = 0
        men_pred = 1

    return inv_pred, men_pred


def PredictTCL(study_id):
    load_bboxes_dir = f'../Dicom_download/bboxes_{study_id}'
    image = load_data(load_bboxes_dir)
    
    # # test multi weighted
    # weight_saved_path_list = ['/mnt16t/liutianling/Fusion/weights/men/con_DisFusion/2023-05-27-01_33/1fold/1fold_train_weight_100_32.pth',
    #                           '/mnt16t/liutianling/Fusion/weights/men/con_DisFusion/2023-05-27-01_43/2fold/2fold_train_weight_100_32.pth',
    #                           '/mnt16t/liutianling/Fusion/weights/men/con_DisFusion/2023-05-27-03_53/3fold/3fold_train_weight_100_32.pth']
    #
    # inv_pred, men_pred = test(image,weight_saved_path_list)
    # print('inv_pred:{}\nmen_pred:{}'.format(inv_pred, men_pred))

    # test one weighted
    weight_saved_path=f'./models/1fold_train_weight_100_32.pth'
    
    inv_pred, men_pred = test_fold(image,weight_saved_path)
    print('inv_pred:{}\nmen_pred:{}'.format(inv_pred, men_pred))
    
    # 另一种结果输出格式
    # pred = test_fold(image, weight_saved_path)
    # print('pred:{}'.format(pred))
    return [inv_pred,men_pred]

if __name__ == '__main__':
    patient_name = 'WANG_XU'
    PredictTCL(patient_name)

