import os
import sys

from glob import glob
from models.LeisionNet_NAS.utils.helpers import *
from models.LeisionNet_NAS.net.nodule_net import NoduleNet as Net
from models.LeisionNet_NAS.config import final_config
from models.LeisionNet_NAS.test import predict
import json
import logging
import SimpleITK as sitk
import models.settings as S
import argparse
import torch
# from keras.models import model_from_json
from models.LeisionNet_NAS.utils.util import load_dicom_image, normalize, pad2factor

sys.path.append("./")
sys.path.append("../")


class Deploy:
    def __init__(self):
        self.current_dir = os.path.dirname(__file__)
        self.datagen_dict = read_json(self.current_dir + "/configs/datagen.json")['datagen']
        self.resampling_dict = read_json(self.current_dir + "/configs/preprocess.json")['preprocessing'][
            "resampling"]
        self.datagen_dict_specs = self.datagen_dict['specs']
        self.datagen_dict_prep = self.datagen_dict['preprocessing']

    def predict_nodule(self, info):
        self.info = info
        self.case = info["case"]
        print("*" * 100)
        # print(self.info)
        print(self.case)

        logging.info('Predicting...')
        net = Net(final_config).cuda()
        weight_path = self.current_dir + "/model/200.ckpt"
        checkpoint = torch.load(weight_path)
        net.load_state_dict(checkpoint['state_dict'])
        model = net

        processed_image = self.extract_patches()
        processed_image = processed_image.copy()
        print('dicom read Shape %s', str(processed_image.shape))

        D, H, W = processed_image.shape
        # input = processed_image.copy()
        # input = input[np.newaxis, ...].astype(np.float32)
        # input = torch.from_numpy(input).float()
        # input = input.unsqueeze(0).cuda()
        input = processed_image.copy()
        input = pad2factor(input)
        print('input Shape 1', str(input.shape))
        input = input[np.newaxis, ...].astype(np.float32)
        print('input Shape 2', str(input.shape))
        input = normalize(input)
        print('input Shape 3', str(input.shape))
        input = torch.from_numpy(input).float()
        print('input Shape 4', str(input.shape))
        input = input.unsqueeze(0).cuda()
        print('input Shape %s', str(input.shape))
        # print(self.info["lps"])
        # t2_test, abk_test, zone_encoding = self.extract_patches()  # 提取图像patch
        # t2_test, abk_test = self.mean_std_standarzation(t2_test, abk_test)
        # x = [t2_test, abk_test]
        # x = [t2_test]
        # predicted_prob = model.predict(x, verbose=1)
        crop_boxes, segments, pred_mask = predict(model, input)
        pred_mask = pred_mask[np.newaxis, :]
        pred_mask = pred_mask[:, :D, :H, :W]
        logging.info('pred_mask Shape %s', str(pred_mask.shape))
        print('预测结果类型', type(pred_mask))
        print('crop_boxes类型', crop_boxes)
        print('segments类型', type(segments))
        print("successss" * 10)

        crop_size = 30
        checkpoint2 = torch.load('/model/ckpt400.t7')
        net2 = checkpoint2['net']
        net2.cuda()
        pred_cls = []
        for crop_box in crop_boxes:
            bgx = crop_box[0]
            bgy = crop_box[1]
            bgz = crop_box[2]
            input2 = np.array(crop_box[bgx:bgx + crop_size, bgy:bgy + crop_size, bgz:bgz + crop_size])
            output = net2(input2)
            prediction = 0
            if not isinstance(output, tuple):
                _, prediction = torch.max(output.data, 1)
            else:
                _, prediction = torch.max(output[0].data, 1)
            pred_cls.append(prediction)

        response_dict = {"case": self.info["case"],
                         # "description": description,
                         # "score": str(scores[0]),
                         "crop_boxes": crop_boxes.tolist(),
                         "segments": str(segments),
                         "clssification": pred_cls}
        torch.cuda.empty_cache()
        return json.dumps(response_dict)

    def resample_image(self, image):
        voxel_resampling_dict = {"t2_tse_tra": self.resampling_dict["spacing"]["t2"],
                                 "ADC": self.resampling_dict["spacing"]["dwi"],
                                 "BVAL": self.resampling_dict["spacing"]["dwi"],
                                 "Ktrans": self.resampling_dict["spacing"]["ktrans"]}
        return resample_new_spacing(image, target_spacing=voxel_resampling_dict["t2_tse_tra"])

    def read_image(self):
        # Load image data
        standard_spacing = np.array([2.5, 0.976562, 0.976562])
        file_path = os.path.join(S.dicom_folder, self.case)
        raw_img, origin, spacing = load_dicom_image(file_path)
        # image_paths = glob(os.path.join(S.nrrd_folder, self.case + '*.nrrd'))  # 返回匹配的文件路径

        # print(image_paths)
        # image = sitk.ReadImage(image_paths[0])
        image_prep = raw_img
        # image = self.resample_image(image)
        # image_prep = preprocess(image=image,
        #                         window_intensity_dict=self.datagen_dict_prep["window_intensity"],
        #                         zero_scale_dict=self.datagen_dict_prep["rescale_zero_one"])
        return image_prep

    def extract_patches(self):
        image = self.read_image()
        # print(image)
        # lps = self.info["lps"]
        # ijk = image.TransformPhysicalPointToIndex(lps)

        # image_cropped = crop_roi(image, ijk, [size_x, size_y, size_z])
        image_cropped = image
        image_cropped_arr = image_cropped
        # image_cropped_arr = sitk.GetArrayFromImage(image_cropped)
        # image_cropped_arr = np.swapaxes(image_cropped_arr, 0, 2)
        print('裁剪形状', np.shape(image_cropped_arr))
        # t2_test[0, ...] = image_cropped_arr
        # print('形状', np.shape(t2_test))
        return image_cropped_arr

    # def mean_std_standarzation(self, t2_arr, abk_arr):
    #     mean_std_dir = os.path.join(os.path.dirname(__file__), "model/mean_stds/")
    #     t2_mean = np.load(mean_std_dir + "/training_t2_mean.npy")
    #     t2_std = np.load(mean_std_dir + "/training_t2_std.npy")
    #     #
    #     abk_mean = np.load(mean_std_dir + "/training_abk_mean.npy")
    #     abk_std = np.load(mean_std_dir + "/training_abk_std.npy")
    #     #
    #     t2_arr -= t2_mean
    #     t2_arr /= t2_std
    #     #
    #     abk_arr -= abk_mean
    #     abk_arr /= abk_std
    #     return t2_arr, abk_arr
