import json
import math
import os
from multiprocessing import Pool, cpu_count
from typing import Dict, Tuple

import numpy as np
import torch
import torchvision.transforms.functional as tf
from PIL import Image
from tqdm import tqdm

# from .dicom_utils import read_one_dcm

PADDING_VALUE: int = 0


def resize(size: Tuple[int, int], image: Image.Image, spacing: torch.Tensor, *coords: torch.Tensor):
    """

    :param size: [height, width]，height对应纵坐标，width对应横坐标
    :param image: 图像
    :param spacing: 像素点间距
    :param coords: 标注是图像上的坐标，[[横坐标,纵坐标]]，横坐标从左到有，纵坐标从上到下
    :return: resize之后的image，spacing，annotation
    """
    # image.size是[width, height]
    height_ratio = size[0] / image.size[1]
    width_ratio = size[1] / image.size[0]

    ratio = torch.tensor([width_ratio, height_ratio])
    spacing = spacing / ratio
    coords = [coord * ratio for coord in coords]
    image = tf.resize(image, size)
    return image, spacing, coords


def rotate_point(points: torch.Tensor, angel, center: torch.Tensor) -> torch.Tensor:
    """
    将points绕着center顺时针旋转angel度
    :param points: size of（*， 2）
    :param angel:
    :param center: size of（2，）
    :return:
    """
    if angel == 0:
        return points
    angel = angel * math.pi / 180
    # while len(center.shape) < len(points.shape):
    # TODO: 'list' object has no attribute 'shape'

    # center, points = np.array(center), np.array(points)
    center, points = center[0], points[0]
    while len(center.shape) < len(points.shape):
        # center, points = torch.tensor(center), torch.tensor(points)
        center = center.unsqueeze(0)
    cos = math.cos(angel)
    sin = math.sin(angel)
    rotate_mat = torch.tensor([[cos, -sin], [sin, cos]], dtype=torch.float32, device=points.device)
    output = points - center
    output = torch.matmul(output, rotate_mat)
    return output + center


def rotate_batch(points: torch.Tensor, angels: torch.Tensor, centers: torch.Tensor) -> torch.Tensor:
    """
    将一个batch的点，按照不同的角度和中心转旋
    :param points: (num_batch, num_points, 2)
    :param angels: (num_batch,)
    :param centers: (num_batch, 2)
    :return:
    """
    centers = centers.unsqueeze(1)
    output = points - centers

    angels = angels * math.pi / 180
    cos = angels.cos()
    sin = angels.sin()
    rotate_mats = torch.stack([cos, sin, -sin, cos], dim=1).reshape(angels.shape[0], 1, 2, 2)
    output = output.unsqueeze(-1)
    output = output * rotate_mats
    output = output.sum(dim=-1)
    return output + centers


def rotate(image: Image.Image, points: torch.Tensor, angel: int) -> (Image.Image, torch.Tensor):
    center = torch.tensor(image.size, dtype=torch.float32) / 2
    return tf.rotate(image, angel), rotate_point(points, angel, center)


def gen_distmap(image: torch.Tensor, spacing: torch.Tensor, *gt_coords: torch.Tensor, angel=0):
    """
    先将每个像素点的坐标顺时针旋转angel之后，再计算到标注像素点的物理距离
    :param image: height * weight
    :param gt_coords: size of（*， 2）
    :param spacing:
    :param angel: 
    :return: heat_map
    """
    coord = torch.where(image.squeeze() < np.inf)
    # 注意需要反转横纵坐标
    center = torch.tensor([image.shape[2], image.shape[1]], dtype=torch.float32) / 2
    coord = torch.stack(coord[::-1], dim=1).reshape(image.size(1), image.size(2), 2)
    coord = rotate_point(coord, angel, center)
    dists = []
    for gt_coord in gt_coords:
        gt_coord = rotate_point(gt_coord, angel, center)
        dist = []
        for point in gt_coord:
            dist.append((((coord - point) * spacing) ** 2).sum(dim=-1).sqrt())
        dist = torch.stack(dist, dim=0)
        dists.append(dist)
    if len(dists) == 1:
        return dists[0]
    else:
        return dists


def gen_mask(coord: torch.Tensor):
    return (coord.index_select(-1, torch.arange(2, device=coord.device)) != PADDING_VALUE).any(dim=-1)
