import os, sys
print(os.getcwd())
sys.path.append(os.getcwd())
from scipy import ndimage
from torch.optim import SGD, Adam
from torch.nn.functional import binary_cross_entropy_with_logits
from torch.optim.lr_scheduler import MultiStepLR, StepLR
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from skorch.core import *
#from skorch.vision import *
from skorch import NeuralNet, NeuralNetClassifier
from skorch.helper import predefined_split
from skorch.dataset import CVSplit
from skorch.callbacks import EpochScoring, LRScheduler, CyclicLR, Checkpoint
from skorch.loss import FocalLoss
from resnet import resnet18
import SimpleITK as sitk

from vertebra_segment import load_pickle

torch.manual_seed(0)

ct_vertebra_stats = [[0.030088576], [0.13161582]] #mean std


class PatchStatus(Enum):
    """
    Patch and mask intersection.
    """
    complemet = 0
    incomplement = 1
    containsnothing = 2


def classify_cnn(n_classes: int=2, softmax: bool=False, feature_scale:int=1, imsize:Sizes=(256,256, 256)):
    """
    classification module
    :param n_classes: classes number
    :param softmax: add softmax layer
    :param feature_scale: scale value
    :param imsize: image size
    :return:
    """
    layers = []
    layers.append(resnet18(sample_input_W=imsize[0],
                           sample_input_H=imsize[1],
                           sample_input_D=imsize[2],
                           shortcut_type='B',
                           no_cuda=False,
                           num_classes=n_classes,
                           task_type='classify'))

    if softmax: layers.append(nn.Softmax(dim=-1))

    return nn.Sequential(*layers)


class ClassifyDataset(Dataset):
    def __init__(self, image_path:PathOrStr, augmentation_func=None, stats:Collection[Tensor]=None, target_size:Sizes=None):
        """
        classification
        :param data_path:
        :param augmentation_func:
        :param stats:
        :param target_size:
        """
        super().__init__()
        self.data_path = image_path[0].parents[1]
        self.images_items = sorted(image_path)
        self.augmentation = None
        self.target_size = target_size
        # if augmentation_func:
        #     self.augmentation = augmentation_func(*self.target_size)
        if stats:
            self.stats = stats

    def read_nii(self, nii_file):
        return sitk.GetArrayFromImage(sitk.ReadImage(str(nii_file)))

    def __len__(self):
        return len(self.images_items)

    def transform_tensor(self, image:NPImage):
        t = image[np.newaxis]
        t = torch.from_numpy((t / (255. if t.dtype == np.uint8 else 1)).astype(np.float32))
        return (t - torch.tensor(self.stats[0])) / torch.tensor(self.stats[1])

    def __getitem__(self, idx):
        pkl_item_file: Path = self.images_items[idx]
        item_info = load_pickle(str(pkl_item_file))

        image_name = pkl_item_file.with_name(item_info['ctvolume_name']).with_suffix(".nii")

        # 1.read origin image and label
        img = self.read_nii(str(image_name))
        label = int(item_info['complement_class'].value)

        scale = float(self.target_size[0]) / img.shape[0]
        if scale != 1.0:
            img = ndimage.zoom(img, scale, mode='nearest')

        # 2.augmente
        if self.augmentation:
            aug_image = self.augmentation(image=img)
            img = aug_image['image']

        return self.transform_tensor(img), label


def create_ct_vertebra_classify_datasets(data_path:PathList, augmentation_func=None, stats:Collection[Tensor]=None, target_size:Sizes=None):
    item_path = Path(os.path.join(data_path, "patch"))
    image_items = sorted(item_path.rglob('*.pkl'))

    x_train, x_test = train_test_split(image_items, test_size=0.25, shuffle=True)

    train_ds = ClassifyDataset(x_train, augmentation_func=augmentation_func, stats=stats, target_size=target_size)
    valida_ds = ClassifyDataset(x_test, augmentation_func=augmentation_func, stats=stats, target_size=target_size)
    return train_ds, valida_ds


def get_dataloader(target_size = (512, 512, 512)):
    data_root = '/home/cao/disk1/cx/VerSe_2019/vertebra_segment_data'

    with open(os.path.join(data_root, "intensityproperties.pkl"), 'rb') as f:
        intensityproperties = pickle.load(f)

    mean_intensity = intensityproperties[0]['mean']
    std_intensity = intensityproperties[0]['sd']

    item_path = Path(os.path.join(data_root, "patch"))
    image_items = sorted(item_path.rglob('*.pkl'))

    counting_items = {PatchStatus.complemet: [],
                      PatchStatus.incomplement: [],
                      PatchStatus.containsnothing: []}

    for item_file in image_items:
        item_info = load_pickle(str(item_file))
        counting_items[item_info['complement_class']].append(item_file)

    for k, v in counting_items.items():
        print(k,": ", len(v))

    return create_ct_vertebra_classify_datasets(data_root, None, (mean_intensity, std_intensity),
                                        target_size=target_size)


def init_model_single_label(valida_ds=None, target_size = (256, 256, 256), n_classes=4, pretrianed=False, dirname='classify_binary'):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    is_parallel = True

    module = classify_cnn(n_classes=n_classes, softmax=True, imsize=target_size)

    stepLR = LRScheduler(policy=StepLR, step_size=30, gamma=0.1)

    net = NeuralNetClassifier(
        module,
        batch_size=12,
        max_epochs=100,
        optimizer=SGD,
        lr=0.01,
        optimizer__momentum=0.9,
        iterator_train__shuffle=True,
        iterator_train__num_workers=8,
        iterator_valid__shuffle=False,
        iterator_valid__num_workers=8,
        train_split=predefined_split(valida_ds),
        callbacks=[
            stepLR,
            Checkpoint(f_params='best_params_v2.pt', dirname=dirname),
            # ('tr_acc', EpochScoring(
            #     'accuracy',
            #     lower_is_better=False,
            #     on_train=True,
            #     name='train_acc',
            # )),
        ],

        device=device,
        parallel=is_parallel
    )

    if pretrianed:
        net.initialize()
        net.load_params(f_params=dirname+'/best_params_v2.pt',
                        f_optimizer=dirname+'/optimizer.pt',
                        f_history=dirname+'/history.json')
    return net


def train():
    target_size = (128, 128, 128)
    train_ds, valida_ds = get_dataloader(target_size)
    net = init_model_single_label(valida_ds, target_size, n_classes=3, pretrianed=False, dirname='classify_v2_check')
    net.fit(train_ds)

    # y_pred = net.predict(valida_ds)
    # score = accuracy_score(y_test, y_pred)


def predict():
    target_size = (128, 128, 128)
    train_ds, valida_ds = get_dataloader()
    net = init_model_single_label(None, target_size, n_classes=3, pretrianed=False, dirname='classify_check')
    net.initialize()

    net.load_params(f_params='classify_v2_check/best_params_v2.pt')
    predicted_probs = net.predict(valida_ds)
    # predicted_probs = 1 / (1 + np.exp(-predicted_probs))
    predicted_logits = (predicted_probs > 0.5).astype('uint8')

    true_labes = []
    for i in range(len(valida_ds)):
        image, label = valida_ds[i]
        true_labes.append(label)
    true_labes = np.asarray(true_labes)

    print(accuracy_score(predicted_logits, true_labes))

    # for i in range(len(valida_ds)):
    #     image, label = valida_ds[i]
    #     show_image = denormalize_image(image, jsrt_stats[0], jsrt_stats[1])
    #     show_image = np.transpose(show_image, [1, 2, 0])
    #
    #     predict_label = predicted_logits[i]
    #     plt.imshow(show_image)
    #     true_name = np.array2string(label, precision=1, separator=',', suppress_small=True)
    #     predict_name = np.array2string(predict_label, precision=1, separator=',', suppress_small=True)
    #     plt.title(true_name+" "+predict_name, {'fontsize':8})
    #
    #     plt.show()
    #     a = input()
    #     if a == "ex":
    #         break
    #     else:
    #         plt.close()


def test_predict_image():
    def norm_input_data(image, stats):
        t = image[np.newaxis]
        t = torch.from_numpy((t / (255. if t.dtype == np.uint8 else 1)).astype(np.float32))
        return (t - torch.tensor(stats[0])) / torch.tensor(stats[1])

    target_size = (128, 128, 128)
    net = init_model_single_label(None, target_size, n_classes=3, pretrianed=False, dirname='classify_v2_check')

    net.initialize()
    net.load_params(f_params='classify_v2_check/best_params_v2.pt')

    data_path = '/home/blake/data/medical/datasets/vertebral/VerSe_2019'

    with open(os.path.join(data_path, "nnUNet_raw_cropped", "VerSe19", "intensityproperties.pkl"), 'rb') as f:
        intensityproperties = pickle.load(f)

    mean_intensity = intensityproperties[0]['mean']
    std_intensity = intensityproperties[0]['sd']

    test_img_file = "/home/blake/data/medical/datasets/vertebral/VerSe_2019/preprocessed_data/VerSe19/patch/train_verse004/ctvolume_10.nii"

    input_img = sitk.GetArrayFromImage(sitk.ReadImage(test_img_file))

    img_t = norm_input_data(input_img, (mean_intensity, std_intensity)).unsqueeze_(0)

    start_time = time.time()
    predicted_probs = net.predict(img_t)
    print('time: ', time.time() - start_time)

    print(predicted_probs)


if __name__ == '__main__':
    print('vertebra classify work...')
    train()
    # predict()
    # test_predict_image()
    # train_floder_labels()
