from pathlib import Path
import random
import cv2
import json
import math
import numpy as np
from scipy.io import loadmat
from torch.utils.data import Dataset
from torch import from_numpy
from ais.core import *
from ais.image import cv_show, keypoint_to_mask, find_bounding_rect, generate_heatmap, draw_umich_gaussian, gaussian_radius
from ais.utils import load_json
from .utils import sort_points_v2

__all__ = ['BaseSegDataset']

class BaseSegDataset(Dataset):
    def __init__(self, data_root: Path,
                 phase,
                 data_items,
                 target_size,
                 augmentation_func,
                 stats,
                 kp_num=68):
        """

        :param data_root:
        :param phase: 'training' or 'test'
        :param data_items:items list
        :param target_size:  [h,w]
        :param augmentation_func:
        :param stats:mean and std
        :param kp_num: number of keypoints
        """

        self.phase = phase
        self.target_size = target_size
        self.data_items = data_items
        self.augmentation = None
        if augmentation_func:
            self.augmentation = augmentation_func(*self.target_size)
        self.data_root = data_root
        self.stats = stats
        self.kp_num = kp_num

    def norm_tensor(self, image: NPImage, data: NPArrayList = None):
        image = image.astype(np.float32)
        image = image / 255.0
        image = image_to_tensor(image, self.stats)

        if is_listy(data):
            mask = [mask_to_tensor(o) for o in data]
            mask = cat(mask, dim=0)
        else:
            mask = mask_to_tensor(data)

        return image, mask

    def __getitem__(self, index):
        item: Path = self.data_items[index]
        subject_item = item.stem
        image_file = self.data_root.joinpath('data', self.phase, item.name)
        kp_file = self.data_root.joinpath('data', self.phase, str(subject_item) + ".json")
        # print(image_file)

        img = cv2.imread(str(image_file))

        with open(str(kp_file), 'r') as f:
            data_dict: dict = json.load(f)
            landmark = data_dict['shapes']
            landmark = sort_points_v2(landmark, self.kp_num)
            mask = keypoint_to_mask(landmark, img.shape[:2])

            # assert item.name == data_dict['imagePath']

        # random vertebra mask
        # if self.ann_type == 'kp' or self.ann_type == 'heatmap':
        #     if self.phase == 'training':
        #         aug_kp = aug_landmark(landmark, p=0.05)
        #         aug_mask = keypoint_to_mask(aug_kp, img.shape[:2])
        #         crop_mask = cv2.cvtColor(aug_mask, cv2.COLOR_GRAY2BGR)
        #     else:
        #         crop_mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
        #     img = img * crop_mask

        mask = mask * 255

        debug = False

        if self.augmentation:
            aug_image = self.augmentation(image=img,
                                          mask=mask)
            img = aug_image['image']
            mask = aug_image['mask']

        if debug:
            show_image = np.copy(img)
            show_mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
            # for i, pt in enumerate(landmark):
            #     show_id = str(i + 1)
            #     # cv2.putText(show_image, show_id, (int(pt[0]) - 5, int(pt[1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 1)
            #     cv2.circle(show_image, (int(pt[0]), int(pt[1])), 1, (0, 255, 255), 1)

            cv2.imshow('s', np.hstack((show_image, show_mask)))
            cv2.waitKey(0)
        return self.norm_tensor(img, mask)

    def __len__(self):
        return len(self.data_items)
