from pathlib import Path
import random
import cv2
import json
import math
import numpy as np
from scipy.io import loadmat
from torch.utils.data import Dataset
from torch import from_numpy
from ais.core import *
from ais.image import cv_show, keypoint_to_mask, find_bounding_rect, generate_heatmap, draw_umich_gaussian, gaussian_radius, resize_uniform_same_size, draw_contours_to_mask
from ais.utils import load_json
from .utils import sort_points, sort_points_v2

__all__ = ['VertebraLandmarkDataset', 'VertebraLandmarkBoundingRectDataset', 'VertebraLandmarkSegDataset']


def aug_landmark(landmarks, p=0.5):
    l_offset = 5
    r_offset = 5
    random_id_nums = 3
    input_landmarks = np.copy(landmarks)
    if random.random() < p:
        len_kp = input_landmarks.shape[0]
        l_p = input_landmarks[0:len_kp: 2]
        r_p = input_landmarks[1:len_kp: 2]

        #random_indexs = np.random.randint(0, l_p.shape[0]//2, random_id_nums)
        random_indexs = random.sample(range(0, l_p.shape[0]//2), random_id_nums)
        #print(random_indexs)

        if random.random() < 0.5:
            #right
            for i in random_indexs:
                if i == 0 or i == r_p.shape[0]//2 - 1:
                    #顶点和底点
                    if random.random() < 0.5:
                        r_p[i * 2][0] += r_offset
                        r_p[i * 2 + 1][0] += r_offset
                    else:
                        r_p[i * 2][0] -= r_offset
                        r_p[i * 2 + 1][0] -= r_offset

                    if random.random() < 0.5:
                        r_p[i * 2][1] += r_offset
                        r_p[i * 2 + 1][1] += r_offset
                    else:
                        r_p[i * 2][1] -= r_offset
                        r_p[i * 2 + 1][1] -= r_offset
                else:
                    if random.random() < 0.5:
                        r_p[i * 2][0] += r_offset
                        r_p[i * 2 + 1][0] += r_offset
                    else:
                        r_p[i * 2][0] -= r_offset
                        r_p[i * 2 + 1][0] -= r_offset
        else:
            #left
            for i in random_indexs:
                if i == 0 or i == l_p.shape[0]//2 - 1:
                    #顶点和底点
                    if random.random() < 0.5:
                        l_p[i * 2][0] += r_offset
                        l_p[i * 2 + 1][0] += r_offset
                    else:
                        l_p[i * 2][0] -= r_offset
                        l_p[i * 2 + 1][0] -= r_offset

                    if random.random() < 0.5:
                        l_p[i * 2][1] += r_offset
                        l_p[i * 2 + 1][1] += r_offset
                    else:
                        l_p[i * 2][1] -= r_offset
                        l_p[i * 2 + 1][1] -= r_offset
                else:
                    if random.random() < 0.5:
                        l_p[i * 2][0] += r_offset
                        l_p[i * 2 + 1][0] += r_offset
                    else:
                        l_p[i * 2][0] -= r_offset
                        l_p[i * 2 + 1][0] -= r_offset

        random_landmarks = []
        for l, r in zip(l_p.tolist(), r_p.tolist()):
            random_landmarks.append(l)
            random_landmarks.append(r)

        random_landmarks = np.array(random_landmarks)

        return random_landmarks
    return input_landmarks


class BaseLandmarkDataset(Dataset):
    def __init__(self, data_root: Path,
                 phase,
                 data_items,
                 target_size,
                 augmentation_func,
                 stats,
                 kp_num=68,
                 heat_size=(128, 64)):
        """

        :param data_root:
        :param phase: 'training' or 'test'
        :param data_items:items list
        :param target_size:  [h,w]
        :param augmentation_func:
        :param stats:mean and std
        :param kp_num: number of keypoints
        :param heat_size:[h,w]
        """

        self.phase = phase
        self.target_size = target_size
        self.data_items = data_items
        self.augmentation = None
        if augmentation_func:
            self.augmentation = augmentation_func(*self.target_size)
        self.data_root = data_root
        self.stats = stats
        self.kp_num = kp_num
        self.heat_size = heat_size

    def norm_tensor(self, image: NPImage, data: NPArrayList = None, landmark: NPArrayList = None):
        image = image.astype(np.float32)
        image = image_to_tensor(image, self.stats)

        if isinstance(landmark, tuple) or isinstance(landmark, list):
            landmark = np.array(landmark)
        # generate heatmap
        heat_map = generate_heatmap(landmark, self.heat_size[::-1], self.target_size[::-1], sigma=1)
        return image, from_numpy(heat_map).float()

    def __getitem__(self, index):
        subject_item: Path = self.data_items[index]
        subject_item = subject_item.name
        image_file = self.data_root.joinpath('data', self.phase, subject_item)
        label_file = self.data_root.joinpath('labels', self.phase, str(subject_item) + ".mat")
        angle_json = self.data_root.joinpath('cobb', self.phase, str(subject_item) + ".json")

        img = cv2.imread(str(image_file))
        landmark = loadmat(str(label_file))['p2']
        mask = keypoint_to_mask(landmark, img.shape[:2])
        angle_dict = load_json(str(angle_json))
        assert subject_item == angle_dict['filename']
        angle = np.array(angle_dict['cobb'])

        if self.ann_type == 'kp' or self.ann_type == 'angle' or self.ann_type == 'heatmap':
            crop_mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
            img = img * crop_mask

        mask = mask * 255

        debug = False
        if debug:
            show_image = np.copy(img)
            show_mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
            for i, pt in enumerate(landmark):
                show_id = str(i + 1)
                # cv2.putText(show_image, show_id, (int(pt[0]) - 5, int(pt[1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 1)
                cv2.circle(show_image, (int(pt[0]), int(pt[1])), 1, (0, 255, 255), 1)

            cv2.imshow('s', np.hstack((show_image, show_mask)))
            cv2.waitKey(0)

        if self.augmentation:
            aug_image = self.augmentation(image=img,
                                          mask=mask,
                                          keypoints=landmark)
            img = aug_image['image']
            mask = aug_image['mask']
            kpts = aug_image['keypoints']
        return self.norm_tensor(img, data=None, landmark=kpts)

    def __len__(self):
        return len(self.data_items)


class VertebraLandmarkDataset(BaseLandmarkDataset):
    """
    generate Heatmap, Center Offset, Corner Offset
    """
    down_ratio = 4

    def generate_norm_gt(self, image, landmark):
        landmark = np.asarray(landmark, np.float32)
        vertebra_num = self.kp_num//4
        image_h, image_w = image.shape[:2][0]//4, image.shape[:2][1]//4
        heatmap = np.zeros((1, image_h, image_w), dtype=np.float32)
        corner_offset = np.zeros((vertebra_num, 2*4), dtype=np.float32)
        center_offset = np.zeros((vertebra_num, 2), dtype=np.float32)
        vertebra_ind = np.zeros((vertebra_num), dtype=np.int64)
        reg_mask = np.zeros((vertebra_num), dtype=np.uint8)

        assert landmark.shape[0] == self.kp_num

        for k in range(vertebra_num):
            pts = landmark[4*k: 4*k+4, :]
            pts = pts / float(self.down_ratio)
            bbox_h = np.mean([np.sqrt(np.sum((pts[0, :] - pts[2, :])**2)), np.sqrt(np.sum((pts[1, :] - pts[3, :])**2))])
            bbox_w = np.mean([np.sqrt(np.sum((pts[0, :] - pts[1, :]) ** 2)), np.sqrt(np.sum((pts[2, :] - pts[3, :]) ** 2))])

            cen_x, cen_y = np.mean(pts, axis=0)
            ct = np.asarray([cen_x, cen_y], dtype=np.float32)
            ct_int = ct.astype(np.int32)
            radius = gaussian_radius((math.ceil(bbox_h), math.ceil(bbox_w)))
            radius = max(0, int(radius))
            draw_umich_gaussian(heatmap[0, :, :], ct_int, radius=radius)
            vertebra_ind[k] = ct_int[1] * image_w + ct_int[0]
            center_offset[k] = ct - ct_int
            reg_mask[k] = 1
            for i in range(4):
                corner_offset[k, 2 * i:2 * i + 2] = ct - pts[i, :]

        image = image.astype(np.float32)
        #image = image_to_tensor(image, self.stats)
        image = (image / 255.) - 0.5
        image = from_numpy(np.transpose(image, (2, 0, 1)))

        return image, {'heatmap': heatmap,
                       'center_offset': center_offset,
                       'corner_offset': corner_offset,
                       'reg_mask': reg_mask,
                       'vertebra_ind': vertebra_ind,
                       'landmark': landmark}

    def __getitem__(self, index):
        item: Path = self.data_items[index]
        subject_item = item.stem
        image_file = self.data_root.joinpath('data', self.phase, item.name)
        kp_file = self.data_root.joinpath('data', self.phase, str(subject_item) + ".json")
        #print(image_file)

        img = cv2.imread(str(image_file))

        with open(str(kp_file), 'r') as f:
            data_dict: dict = json.load(f)
            landmark = data_dict['shapes']
            landmark, _ = sort_points(landmark, self.kp_num)
            mask = keypoint_to_mask(landmark, img.shape[:2])

            #assert item.name == data_dict['imagePath']

        #random vertebra mask
        # if self.ann_type == 'kp' or self.ann_type == 'heatmap':
        #     if self.phase == 'training':
        #         aug_kp = aug_landmark(landmark, p=0.05)
        #         aug_mask = keypoint_to_mask(aug_kp, img.shape[:2])
        #         crop_mask = cv2.cvtColor(aug_mask, cv2.COLOR_GRAY2BGR)
        #     else:
        #         crop_mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
        #     img = img * crop_mask

        mask = mask * 255

        debug = False

        if self.augmentation:
            aug_image = self.augmentation(image=img,
                                          mask=mask,
                                          keypoints=landmark)
            img = aug_image['image']
            mask = aug_image['mask']
            landmark = aug_image['keypoints']

        if debug:
            show_image = np.copy(img)
            show_mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
            for i, pt in enumerate(landmark):
                show_id = str(i + 1)
                # cv2.putText(show_image, show_id, (int(pt[0]) - 5, int(pt[1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 1)
                cv2.circle(show_image, (int(pt[0]), int(pt[1])), 1, (0, 255, 255), 1)

            cv2.imshow('s', np.hstack((show_image, show_mask)))
            cv2.waitKey(0)

        return self.generate_norm_gt(img, landmark)


class VertebraLandmarkBoundingRectDataset(VertebraLandmarkDataset):
    default_bounding_rect_offset = 40

    def generate_bounding_rect_offset(self):
        return np.random.randint(self.default_bounding_rect_offset - 20, self.default_bounding_rect_offset + 20, 1,
                                 dtype=np.int)

    def __getitem__(self, index):
        item: Path = self.data_items[index]
        subject_item = item.stem
        image_file = self.data_root.joinpath('data', self.phase, item.name)
        kp_file = self.data_root.joinpath('data', self.phase, str(subject_item) + ".json")
        #print(image_file)

        img = cv2.imread(str(image_file))

        with open(str(kp_file), 'r') as f:
            data_dict: dict = json.load(f)
            landmark = data_dict['shapes']
            landmark = sort_points_v2(landmark, self.kp_num)
            mask = keypoint_to_mask(landmark, img.shape[:2])

        mask = mask * 255
        bounding_xyxy, _ = find_bounding_rect(mask)
        offset = self.generate_bounding_rect_offset()[0]
        bounding_xyxy[0] -= offset
        bounding_xyxy[1] -= offset
        bounding_xyxy[2] += offset
        bounding_xyxy[3] += offset
        h, w = mask.shape[0], mask.shape[1]
        bounding_xyxy[0::2] = np.clip(bounding_xyxy[0::2], 0, w)
        bounding_xyxy[1::2] = np.clip(bounding_xyxy[1::2], 0, h)

        img = img[bounding_xyxy[1]: bounding_xyxy[3],
              bounding_xyxy[0]: bounding_xyxy[2]]
        mask = mask[bounding_xyxy[1]: bounding_xyxy[3],
              bounding_xyxy[0]: bounding_xyxy[2]]
        landmark[:, 0] = landmark[:, 0] - bounding_xyxy[0]
        landmark[:, 1] = landmark[:, 1] - bounding_xyxy[1]
        landmark[:, 0] = np.clip(landmark[:, 0], 0, img.shape[1])
        landmark[:, 1] = np.clip(landmark[:, 1], 0, img.shape[0])

        if self.augmentation:
            aug_image = self.augmentation(image=img,
                                          mask=mask,
                                          keypoints=landmark)
            img = aug_image['image']
            mask = aug_image['mask']
            landmark = aug_image['keypoints']

        debug = False
        if debug:
            show_image = np.copy(img)
            show_mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
            for i, pt in enumerate(landmark):
                show_id = str(i + 1)
                # cv2.putText(show_image, show_id, (int(pt[0]) - 5, int(pt[1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 1)
                cv2.circle(show_image, (int(pt[0]), int(pt[1])), 2, (0, 255, 255), 2)
            cv_show('show', np.hstack((show_image, show_mask)), resize=0.5)
            cv2.waitKey(0)

        return self.generate_norm_gt(img, landmark)


class VertebraLandmarkSegDataset(VertebraLandmarkBoundingRectDataset):
    def __getitem__(self, index):
        item: Path = self.data_items[index]
        subject_item = item.stem
        image_file = self.data_root.joinpath('data', self.phase, item.name)
        kp_file = self.data_root.joinpath('data', self.phase, str(subject_item) + ".json")
        #print(image_file)

        img = cv2.imread(str(image_file))

        with open(str(kp_file), 'r') as f:
            data_dict: dict = json.load(f)
            landmark = data_dict['shapes']
            landmark = sort_points_v2(landmark, self.kp_num)
            if self.phase == 'training':
                aug_kp = aug_landmark(landmark, p=0.05)
                mask = keypoint_to_mask(aug_kp, img.shape[:2])
                #mask = cv2.cvtColor(aug_mask, cv2.COLOR_GRAY2BGR)
            else:
                mask = keypoint_to_mask(landmark, img.shape[:2])

        mask = mask * 255
        bounding_xyxy, ct = find_bounding_rect(mask)

        # bounding_rect = [bounding_xyxy[0], bounding_xyxy[1], bounding_xyxy[2]-bounding_xyxy[0]+1, bounding_xyxy[3]-bounding_xyxy[1]+1]
        #
        # draw_image = draw_contours_to_mask(mask, [ct])
        # draw_image = (draw_image * 255).astype(np.uint8)
        #
        # resize_draw_image = resize_uniform_same_size(draw_image, bounding_rect, 1.2)
        #
        # cv_show('draw', np.vstack((draw_image, resize_draw_image)))
        # cv2.waitKey(0)

        #mask = resize_draw_image


        offset = self.generate_bounding_rect_offset()
        bounding_xyxy[0] -= offset
        bounding_xyxy[1] -= offset
        bounding_xyxy[2] += offset
        bounding_xyxy[3] += offset
        h, w = mask.shape[0], mask.shape[1]
        bounding_xyxy[0::2] = np.clip(bounding_xyxy[0::2], 0, w)
        bounding_xyxy[1::2] = np.clip(bounding_xyxy[1::2], 0, h)

        img = img[bounding_xyxy[1]: bounding_xyxy[3],
              bounding_xyxy[0]: bounding_xyxy[2]]
        mask = mask[bounding_xyxy[1]: bounding_xyxy[3],
              bounding_xyxy[0]: bounding_xyxy[2]]
        landmark[:, 0] = landmark[:, 0] - bounding_xyxy[0]
        landmark[:, 1] = landmark[:, 1] - bounding_xyxy[1]
        landmark[:, 0] = np.clip(landmark[:, 0], 0, img.shape[1])
        landmark[:, 1] = np.clip(landmark[:, 1], 0, img.shape[0])

        crop_mask = cv2.cvtColor((mask/255).astype(np.uint8), cv2.COLOR_GRAY2BGR)
        img = img * crop_mask

        if self.augmentation:
            aug_image = self.augmentation(image=img,
                                          mask=mask,
                                          keypoints=landmark)
            img = aug_image['image']
            mask = aug_image['mask']
            landmark = aug_image['keypoints']

        debug = False
        if debug:
            show_image = np.copy(img)
            show_mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
            for i, pt in enumerate(landmark):
                show_id = str(i + 1)
                # cv2.putText(show_image, show_id, (int(pt[0]) - 5, int(pt[1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 1)
                cv2.circle(show_image, (int(pt[0]), int(pt[1])), 2, (0, 255, 255), 2)
            cv_show('show', np.hstack((show_image, show_mask)), resize=0.5)
            cv2.waitKey(0)

        return self.generate_norm_gt(img, landmark)
