from pathlib import Path
from tqdm import tqdm
import random
import os
import cv2
import json
import math
import numpy as np
from scipy.io import loadmat
from skimage import measure
from torch.utils.data import Dataset
from torch import from_numpy
from ais.core import *
from ais.image import cv_show, keypoint_to_mask, find_bounding_rect, generate_heatmap, draw_umich_gaussian, gaussian_radius
from ais.utils import load_json
from ais.data import sort_points_v2
from pycocotools.coco import COCO
from ais.image.pydcm import dicom_convert_jpg

try:
    from shapely.geometry import Polygon, MultiPolygon
except ImportError:
    raise ImportError('Please run "pip install Shapely" to '
                      'install Shapely first.')

class COCO_Data():
    def __init__(self, ann_file):
        self.img_infos = self.load_annotations(ann_file)

    def load_annotations(self, ann_file):
        self.coco = COCO(ann_file)
        self.cat_ids = self.coco.getCatIds()
        self.cat2label = {
            vk['name']:vk['id']
            for k, vk in self.coco.cats.items()
        }
        self.label2cat = {
            vk['id']:vk['name']
            for k, vk in self.coco.cats.items()
        }
        self.img_ids = self.coco.getImgIds()
        img_infos = []
        for i in self.img_ids:
            info = self.coco.loadImgs([i])[0]
            info['filename'] = info['file_name']
            img_infos.append(info)
        return img_infos

    def get_ann_info(self, idx):
        img_id = self.img_infos[idx]['id']
        ann_ids = self.coco.getAnnIds(imgIds=[img_id])
        ann_info = self.coco.loadAnns(ann_ids)
        return ann_info


def create_sub_mask_annotation(sub_mask):
    # Find contours (boundary lines) around each sub-mask
    # Note: there could be multiple contours if the object
    # is partially occluded. (E.g. an elephant behind a tree)
    contours = measure.find_contours(sub_mask, 0.5, positive_orientation='low')

    segmentations = []
    polygons = []
    for contour in contours:
        # Flip from (row, col) representation to (x, y)
        # and subtract the padding pixel
        for i in range(len(contour)):
            row, col = contour[i]
            contour[i] = (col - 1, row - 1)

        # Make a polygon and simplify it
        poly = Polygon(contour)
        poly = poly.simplify(1.0, preserve_topology=False)
        polygons.append(poly)
        segmentation = np.array(poly.exterior.coords).ravel().tolist()
        # segmentations.append(segmentation)
        return segmentation


def convert_Cobb_instance_only(root=Path("/home/blake/data/medical/datasets/vertebral/boostnet_labeldata"),
                               data_types=['training']):
    """
       create coco json file
       :param root: data path
       :param data_types: train ,test or both
       :param category_type:abnormality(calcification, ,mass), pathology(MALIGNANT, BENIGN) or both
       :return:
       """
    show_flag = True

    if len(data_types) == 0 or len(data_types) > 2:
        os.error("data_types: error ", data_types)
        return

    anno_out_path = root.joinpath("annotations")
    if not anno_out_path.exists():
        anno_out_path.mkdir()

    for data_type in data_types:
        print('Starting %s' % data_types)
        ann_dict = {}
        info = {}
        images = []
        annotations = []

        info['description'] = "Cobb Dataset 2020"
        info['url'] = "cobb_ais "
        info['version'] = "1.0"
        info["year"] = 2020
        info["contributor"] = "GD"
        info["date_created"] = "2020/08/19"

        json_name = 'ais_cobb_instances_%s.json'

        img_id = 0
        ann_id = 0
        cat_id = 1


        image_lists = sorted(root.joinpath(data_type).glob("*.jpg"))


        for img_path in tqdm(image_lists):
            img = cv2.imread(str(img_path))
            # print(image_file)
            with open(str(img_path.with_suffix('.json')), 'r') as f:
                data_dict: dict = json.load(f)
                landmark = data_dict['shapes']
                landmark = sort_points_v2(landmark, 68)
                mask = keypoint_to_mask(landmark, img.shape[:2])
            height, width = img.shape[0:2]

            image = {}
            image['id'] = img_id
            img_id += 1

            image['width'] = width
            image['height'] = height
            image['file_name'] = str(img_path.name)
            images.append(image)


            bbox_xyxy, _ = find_bounding_rect(mask*255)

            bbox_xywh = [int(bbox_xyxy[0]), int(bbox_xyxy[1]), int(bbox_xyxy[2]-bbox_xyxy[0] + 1), int(bbox_xyxy[3]-bbox_xyxy[1] + 1)]


            # segment_point = create_sub_mask_annotation(mask)
            mask_point_left = []
            mask_point_right = []
            for i in range(int(landmark.shape[0] / 2)):
                l_p = landmark[i * 2]
                r_p = landmark[i * 2 + 1]
                mask_point_left.append([int(l_p[0]), int(l_p[1])])
                mask_point_right.append([int(r_p[0]), int(r_p[1])])
            mask_point = []
            mask_point.extend(mask_point_right)
            mask_point_left = mask_point_left[::-1]
            mask_point.extend(mask_point_left)
            segment_point = [np.array(mask_point).astype(float).flatten().tolist()]

            ann = {}
            ann['id'] = ann_id
            ann_id += 1
            ann['image_id'] = image['id']

            ann['category_id'] = 0
            ann['iscrowd'] = 0

            ann['area'] = bbox_xywh[2]*bbox_xywh[3]
            ann['bbox'] = bbox_xywh
            ann['segmentation'] = segment_point

            annotations.append(ann)

        ann_dict['images'] = images
        categories = [{"id": 0, "name": 'vertebra'}]
        ann_dict['categories'] = categories
        ann_dict['annotations'] = annotations
        ann_dict['info'] = info
        print("Num categories: %s" % len(categories))
        print("Num images: %s" % len(images))
        print("Num annotations: %s" % len(annotations))
        print(ann_dict)
        with open(os.path.join(anno_out_path, json_name % data_type), 'wb') as outfile:
            outfile.write(json.dumps(ann_dict).encode("utf-8"))



def show_Cobb_data_ann(root=Path("/home/blake/data/medical/datasets/vertebral/boostnet_labeldata"),
                               data_types='training'):

    anno_out_path = root.joinpath("annotations")
    #json_name = 'cobb_instances_separate_%s.json'
    json_name = 'ais_cobb_instances_%s.json'% data_types
    ann_file = os.path.join(anno_out_path, json_name)

    gt_bbox_color = (0, 0, 255)

    coco = COCO_Data(ann_file)
    print("data len: ", len(coco.img_infos))

    for idx in range(len(coco.img_infos)):
        img_info = coco.img_infos[idx]
        ann_info = coco.get_ann_info(idx)

        file_name = root.joinpath(data_types, img_info['file_name'])

        if file_name.suffix == ".jpg":
            show_gt_img = cv2.imread(str(file_name), cv2.IMREAD_COLOR)
        elif file_name.suffix == ".dcm":
            show_gt_img = read_dcm_to_8bit(str(file_name))

        for j, ann in enumerate(ann_info):
            if ann.get('ignore', False):
                continue
            x1, y1, w, h = ann['bbox']
            label = ann['category_id']

            segmentation = ann['segmentation'][0]

            mask = coco.coco.annToMask(ann).astype(np.bool)

            # image_show('mask', (mask.astype(np.uint8)*255), resize=0.6)
            # cv2.waitKey(0)

            color_mask = np.array([255, 0, 0])#np.random.randint(0, 256, (1, 3), dtype=np.uint8)
            show_gt_img[mask] = show_gt_img[mask]*0.5 + color_mask*0.5

            left_top = (int(x1), int(y1))
            right_bottom = (int(x1 + w), int(y1 + h))
            cv2.rectangle(
                show_gt_img, left_top, right_bottom, gt_bbox_color, thickness=4)

            # for i in range(len(segmentation)//2):
            #     pt = (segmentation[2*i], segmentation[2*i+1])
            #     show_id = str(i + 1)
            #     cv2.putText(show_gt_img, show_id, (int(pt[0]) - 5, int(pt[1])), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0),2)
            #
            #     show_gt_img = cv2.circle(show_gt_img, (int(pt[0]), int(pt[1])), 4, (0, 255, 255), 12)

        cv_show(file_name.name, show_gt_img, resize=0.6)
        cv2.waitKey(0)
        cv2.destroyWindow(file_name.name)


def create_mask():
    import json
    from pathlib import Path
    import cv2
    from ais.image import cv_show, keypoint_to_mask
    root = Path("/home/blake/data/medical/datasets/vertebral/AIS/cobb/data")
    save_dst = root.joinpath('seg')
    if not save_dst.exists():
        save_dst.mkdir()

    for image_file in tqdm(sorted(root.joinpath('image').glob('*.jpg'))):
        img = cv2.imread(str(image_file))
        # print(image_file)
        with open(str(image_file.with_suffix('.json')), 'r') as f:
            data_dict: dict = json.load(f)
            landmark = data_dict['shapes']
            landmark = sort_points_v2(landmark, 68)
            mask = keypoint_to_mask(landmark, img.shape[:2])

        mask: np.ndarray = mask * 1
        save_file = save_dst.joinpath(image_file.name).with_suffix('.png')
        # cv_show('mask', mask.astype(np.uint8))
        # cv2.waitKey(0)

        cv2.imwrite(str(save_file), mask.astype(np.uint8))


def convert_dcm_dirs(input_dir=Path('/home/blake/data/medical/datasets/breast'), output_dir=Path('')):
    dcm_items = sorted(input_dir.glob('*.dcm'))

    for i, dcm_file in enumerate(dcm_items):
        jpg_file = output_dir.joinpath(dcm_file.stem + '.jpg')
        dicom_convert_jpg(str(dcm_file), str(jpg_file))


if __name__ == '__main__':
    print(__file__)
    #convert_Cobb_instance_only(root=Path("/home/blake/data/medical/datasets/vertebral/AIS/cobb/data"), data_types=['train', 'val'])
    #show_Cobb_data_ann(root=Path("/home/blake/data/medical/datasets/vertebral/AIS/cobb/data"), data_types='train')
    convert_dcm_dirs(Path("/home/blake/data/dataset/datasets/DR/dicom/background"), Path("/home/blake/data/medical/datasets/vertebral/AIS/classification/bk"))
