#!/usr/bin/env python3
"""
RGB point cloud generation utilities for the data processing pipeline.

This module provides a function to generate point clouds from RGB and depth images
using camera intrinsics and extrinsics.

Automatic Workflow:
===================
If the required colour *indices* for the current embodiment are **not** yet
present in ``allowed_seg_colors.json`` the code will automatically create an
annotated segmentation image so you can inspect the colours, then exit.  After
you add the desired indices to the JSON file, rerunning the script will load
those indices, convert them to concrete RGB triplets, and continue with RGB
point-cloud generation without further user intervention.

"""
import numpy as np
import logging
import warnings
import json
import sys
import os  # Needed for file existence checks
from typing import Dict, Any, Optional, List
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import torch
warnings.filterwarnings('ignore')
from unproject import create_pointcloud_from_depth_and_colors, TARGET_CAMERAS
from pathlib import Path
from loadJson import _load_seg_json, _load_emb_name_map, _save_seg_json
from time_it import timeit
UNIQUE_COLORS = None
SEG_IMG = None
class RGBPCD:
    @timeit()
    def __init__(self, unifiedData: dict[str, np.ndarray],  embodiment, surpress_print,color_key,output_key,seg_source):
        self.unifiedData = unifiedData
        self.depth_scale = 0.001
        self.min_depth = 0.1
        self.embodiment = embodiment
        self.surpress_print = surpress_print
        self.camera_for_seg = 'midBack_camera'
        global UNIQUE_COLORS, SEG_IMG
        if UNIQUE_COLORS is None or SEG_IMG is None:
            self.unique_colors, self.seg_img = self.get_unique_colors(self.unifiedData, self.camera_for_seg)
            UNIQUE_COLORS = self.unique_colors
            SEG_IMG = self.seg_img
        else:
            self.unique_colors = UNIQUE_COLORS
            self.seg_img = SEG_IMG
        self.PRINTED=False
        self.seg_source = seg_source
        self.embNameMap = _load_emb_name_map()
        self.seg_json = _load_seg_json(seg_source)
        self.color_key=color_key
        self.output_key=output_key
        if not self.surpress_print:
            print("seg_json",self.seg_json)
            print("embNameMap",self.embNameMap)
    def _save_seg_json(self):
        _save_seg_json(self.seg_json, self.seg_source)
    @timeit()
    def get_unique_colors(self, unifiedData, camera_name):
        seg_img = unifiedData['cameras'][camera_name].get('segmentation', None)
        if seg_img is None:
            print(f"No segmentation image found for camera {camera_name}.")
            return [], None
        flattened_colors = seg_img.reshape(-1, seg_img.shape[-1])
        unique_colors = np.unique(flattened_colors, axis=0)
        sorted_indices = np.lexsort((unique_colors[:, 2], unique_colors[:, 1], unique_colors[:, 0]))
        unique_colors = unique_colors[sorted_indices]
        return unique_colors, seg_img
    def get_seg_json_field(self, emb_key, field_key, no_exit=False):
        if not self.surpress_print:
            print("get_seg_json_field",emb_key,field_key,no_exit)
        emb_entry = self.seg_json.get(emb_key, {})
        if not isinstance(emb_entry, dict):
            emb_entry = {}
        field_entry = emb_entry.get(field_key, {})
        if not isinstance(field_entry, dict):
            field_entry = {}
        indices: list[int] = field_entry.get('indices', [])
        colors: list[list[int]] = field_entry.get('colors', []) or []
        if not self.surpress_print:
            print("get_seg_json_field with indices and colors",indices,colors)
        if colors:
            # if not self.PRINTED:
            #     print(f"Loaded {len(colors)} pre-computed colors for {emb_key}/{field_key}.")
            #     self.PRINTED = True
            return colors
        # If indices present, extract colors from seg img, update JSON, return
        if indices:
            selected_colors: list[list[int]] = []
            for idx in indices:
                if 0 <= idx < len(self.unique_colors):
                    selected_colors.append(self.unique_colors[idx].tolist())
                else:
                    print(f"Warning: index {idx} is out of range for {emb_key}/{field_key} and will be ignored.")
            if not selected_colors:
                print(f"Error: none of the provided indices matched valid colours for {emb_key}/{field_key} – aborting.")
                if not no_exit:
                    sys.exit(1)
                return []
            # Persist colors to JSON
            if not isinstance(self.seg_json, dict):
                raise RuntimeError("seg_json must be a dict")
            if emb_key not in self.seg_json or not isinstance(self.seg_json[emb_key], dict):
                self.seg_json[emb_key] = {}
            if field_key not in self.seg_json[emb_key] or not isinstance(self.seg_json[emb_key][field_key], dict):
                self.seg_json[emb_key][field_key] = {} # type: ignore
            self.seg_json[emb_key][field_key]['colors'] = selected_colors # type: ignore
            self._save_seg_json()
            if not self.surpress_print:
                print(f"Wrote {len(selected_colors)} colors for {emb_key}/{field_key} to allowed_seg_colors.json.")
            return selected_colors

        print(f"No indices found for {emb_key}/{field_key}. Generating annotated segmentation image …")

        self.plot_seg(self.unifiedData, camera_name=self.camera_for_seg, embodiment=f"{emb_key}_{field_key}")
        print(f"Please add the desired index numbers to allowed_seg_colors.json for {emb_key}/{field_key} and rerun.")
        if no_exit:
            return []
        sys.exit(0)

    def plot_seg(self, unifiedData, camera_name, embodiment):
        """
        Plot the segmentation image for a given camera and save it with annotations of unique colors and their RGB values.
        Args:
            unifiedData: dict containing the data, must have unifiedData['cameras'][camera_name]['segmentation']
            camera_name: which camera to plot
            embodiment: embodiment name for the output filename
        """
        print(f"DEBUG: seg_img shape: {self.seg_img.shape}, dtype: {self.seg_img.dtype}, min: {self.seg_img.min()}, max: {self.seg_img.max()}")

        fig = plt.figure(figsize=(16, 10))
        
        ax1 = plt.subplot2grid((1, 4), (0, 0), colspan=3)
        img_plot = ax1.imshow(self.seg_img)
        ax1.set_title(f"Segmentation for {camera_name}", fontsize=14, fontweight='bold')
        ax1.set_xlabel('X (pixels)')
        ax1.set_ylabel('Y (pixels)')
        
        # Sidebar subplot (right side)
        ax2 = plt.subplot2grid((1, 4), (0, 3))
        ax2.set_xlim(0, 1)
        ax2.set_ylim(0, len(self.unique_colors) + 1)
        ax2.set_title("Color Labels", fontsize=12, fontweight='bold')
        ax2.axis('off')
        for i, color in enumerate(self.unique_colors):
            color_mask = np.all(self.seg_img == color, axis=2)
            if np.any(color_mask):
                y_coords, x_coords = np.where(color_mask)
                center_x = float(np.mean(x_coords))
                center_y = float(np.mean(y_coords))
                norm_color = color / 255.0
                ax1.plot(center_x, center_y, 'o', markersize=12, markeredgewidth=2, 
                    markeredgecolor='white', markerfacecolor=norm_color)
                ax1.text(center_x +3, center_y, str(i), 
                        fontsize=12, color='white', weight='bold',
                        bbox=dict(boxstyle="round,pad=0.3", facecolor='black', alpha=0.7))
        for i, color in enumerate(self.unique_colors):
            norm_color = color / 255.0
            label_y = len(self.unique_colors) - i - 0.5
            rect = patches.Rectangle((0.1, label_y - 0.3), 0.2, 0.6,facecolor=norm_color, edgecolor='black', linewidth=1)
            ax2.add_patch(rect)
            rgb_text = f"{i}: [ ] ({color[0]},{color[1]},{color[2]})"
            ax2.text(0.35, label_y, rgb_text, 
                    fontsize=10, verticalalignment='center')
        
        plt.tight_layout()
        
        output_filename = f"segmentation_{camera_name}_{embodiment}_annotated.png"
        plt.savefig(output_filename, dpi=300, bbox_inches='tight')
        print(f"Segmentation image saved as: {os.path.abspath(output_filename)}")
        plt.close()  
        return self.unique_colors
    @timeit()
    def seg_to_mask(self, desired_colors: list[list[int]]) -> dict[str, np.ndarray]:
        masks: dict[str, np.ndarray] = {}
        if 'cameras' not in self.unifiedData or not self.unifiedData['cameras']:
            print("seg_to_mask: no camera data available")
            return masks
        desired_set = {tuple(map(int, c)) for c in desired_colors}
        cams_field = self.unifiedData.get('cameras')
        cameras_dict = cams_field if isinstance(cams_field, dict) else {}
        for cam_name, cam_data in cameras_dict.items():
            if cam_name=="head_camera":
                continue
            seg_img = cam_data.get('segmentation')
            if seg_img is None or seg_img.size == 0:
                continue
            mask = np.zeros(seg_img.shape[:2], dtype=bool)
            for col in desired_set:
                col_arr = np.array(col, dtype=seg_img.dtype).reshape(1, 1, 3)
                match = np.all(seg_img == col_arr, axis=2)
                mask |= match
            masks[cam_name] = mask
        return masks
    @timeit()
    def get_mask_dict(self, embodiment: str):
        allowed_seg_colors = []
        mask_dict={}
        @timeit()
        def merge_mask(Adict,Bdict):
            for key, value in Bdict.items():
                if key not in Adict:
                    Adict[key] = value
                else:
                    Adict[key] |= value
            return Adict
        if embodiment is not None and isinstance(embodiment, str) and embodiment != 'n':
            emb_code, gripper_flag = embodiment[0], embodiment[1]
            emb_key = self.embNameMap.get(emb_code, emb_code)
            env_colors = self.get_seg_json_field(emb_key, 'env', no_exit=True if gripper_flag=='1' else False)
            gripper_colors = self.get_seg_json_field(emb_key, 'gripper', no_exit=False)
            envmask = self.seg_to_mask(env_colors)
            grippermask = self.seg_to_mask(gripper_colors)
            self.unifiedData['ec'] = sum(mask.sum() for mask in envmask.values())
            self.unifiedData['gc'] = sum(mask.sum() for mask in grippermask.values())
            if (gripper_flag=='1' and len(embodiment)==2) or (len(embodiment)>2):
                allowed_seg_colors = env_colors+gripper_colors
                mask_dict=merge_mask(envmask,grippermask)
            else:
                allowed_seg_colors = env_colors
                mask_dict=envmask
        elif  embodiment == 'n':
            allowed_seg_colors = [color.tolist() for color in self.unique_colors if not np.array_equal(color, [0, 0, 0])]
            mask_dict=self.seg_to_mask(allowed_seg_colors)
        cnt=0
        for key,val in mask_dict.items():  
            cnt+=val.sum()  
            if not self.surpress_print:
                print("mask_dict",key,val.shape)
        if not self.surpress_print:
            print("cnt",cnt)
        return mask_dict
    @timeit()
    def rgbPcd(
        self,
    ) -> dict[str, np.ndarray]:
        mask_dict = self.get_mask_dict(self.embodiment)
        # print("mask_dict midBack_camera shape",mask_dict["midBack_camera"].shape)
        result = create_pointcloud_from_depth_and_colors(
            unifiedData=self.unifiedData,
            color_key=self.color_key,
            output_key=self.output_key,
            mask_pixels=mask_dict,
            surpress_print=self.surpress_print,
        )
        # print("result pointcloud shape",result["repointDcloud"].shape)
        return result

@timeit()
def rgbPcd(unifiedData,embodiment,surpress_print,color_key,output_key,seg_source="traverse"):
    if not surpress_print:
        print("rgbPcd",embodiment,surpress_print)
    rgbPcd=RGBPCD(unifiedData,embodiment=embodiment,surpress_print=surpress_print,color_key=color_key,output_key=output_key,seg_source=seg_source)
    return rgbPcd.rgbPcd()