import numpy as np
import torch
import cv2
from utils.dataset_processing.grasp import Grasp
from utils.dataset_processing import image
from skimage.feature import peak_local_max


class CameraData:
    """
    Dataset wrapper for the camera data.
    """
    def __init__(self,
                 width=500,
                 height=300,
                 output_size=224,
                 include_depth=False,
                 include_rgb=True
                 ):
        """
        :param output_size: Image output size in pixels (square)
        :param include_depth: Whether depth image is included
        :param include_rgb: Whether RGB image is included
        """
        self.output_size = output_size
        self.include_depth = include_depth
        self.include_rgb = include_rgb

        if include_depth is False and include_rgb is False:
            raise ValueError('At least one of Depth or RGB must be specified.')

        left = (width - output_size) // 2
        top = (height - output_size) // 2
        right = (width + output_size) // 2
        bottom = (height + output_size) // 2

        self.bottom_right = (bottom, right)
        self.top_left = (top, left)

    @staticmethod
    def numpy_to_torch(s):
        if len(s.shape) == 2:
            return torch.from_numpy(np.expand_dims(s, 0).astype(np.float32))
        else:
            return torch.from_numpy(s.astype(np.float32))

    def get_depth(self, img):
        depth_img = image.Image(img)
        depth_img.crop(bottom_right=self.bottom_right, top_left=self.top_left)
        depth_img.normalise()
        depth_img.img = np.expand_dims(depth_img.img, 0)
        # depth_img.resize((self.output_size, self.output_size))
        # depth_img.img = depth_img.img.transpose((2, 0, 1))
        return depth_img.img

    def get_rgb(self, img, norm=True):
        rgb_img = image.Image(img)
        rgb_img.crop(bottom_right=self.bottom_right, top_left=self.top_left)
        # rgb_img.resize((self.output_size, self.output_size))
        if norm:
                rgb_img.normalise()
                rgb_img.img = rgb_img.img.transpose((2, 0, 1))
        return rgb_img.img

    def get_data(self, rgb=None, depth=None):
        depth_img = None
        rgb_img = None
        # Load the depth image
        if self.include_depth:
            depth_img = self.get_depth(img=depth)

        # Load the RGB image
        if self.include_rgb:
            rgb_img = self.get_rgb(img=rgb)
        # print(rgb_img.shape, depth_img.shape)
        if self.include_depth and self.include_rgb:
            x = self.numpy_to_torch(
                    np.concatenate(
                        (np.expand_dims(depth_img, 0),
                         np.expand_dims(rgb_img, 0)),
                        1
                    )
                )
        elif self.include_depth:
            x = self.numpy_to_torch(depth_img)
        elif self.include_rgb:
            filling = np.zeros((1,224,224))
            x = self.numpy_to_torch(np.concatenate(
                (np.expand_dims(filling, 0),np.expand_dims(rgb_img, 0)),1))
        return x, depth_img, rgb_img
        
    def get_marked_image(self,rgb = None ,q_img = None, ang_img = None, width_img = None, no_grasps=1):

        """
        Detect grasps in a network output.
        :param q_img: Q image network output
        :param ang_img: Angle image network output
        :param width_img: (optional) Width image network output
        :param no_grasps: Max number of grasps to return
        :return: list of Grasps
        """
        local_max = peak_local_max(q_img, min_distance=10, threshold_abs=0.01, num_peaks=no_grasps)
        grasps = []
        temp = [0,0]
        image = rgb
        if local_max is not None:
            for grasp_point_array in local_max:
                grasp_point = tuple(grasp_point_array)
                temp[1] = self.top_left[1] + grasp_point[1]
                temp[0] = self.top_left[0] + grasp_point[0]
                temp_1 = tuple(temp)
                grasp_angle = ang_img[grasp_point]
                grasp_quality = q_img[grasp_point]
                #print(grasp_quality)
                g = Grasp(temp_1, grasp_angle, grasp_quality)
                if grasp_quality>=1:
                    label = "score:  1"
                else:
                    label = "score:  "+str(round(grasp_quality,2))
                if width_img is not None:
                    g.length = width_img[grasp_point]
                    g.width = g.length / 2
                cv2.circle(image,(temp[1],temp[0]),2,(0,0,255))
                cv2.polylines(image,[g.as_gr_mine()],True,(255,0,0))
                cv2.putText(image, label,(50,50),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),2)

                grasps.append(g)
        return grasps,image