#!/usr/bin/env python3
"""
RGB point cloud generation utilities for the data processing pipeline.

This module provides a function to generate point clouds from RGB and depth images
using camera intrinsics and extrinsics.

Automatic Workflow:
===================
If the required colour *indices* for the current mask_param are **not** yet
present in ``allowed_seg_colors.json`` the code will automatically create an
annotated segmentation image so you can inspect the colours, then exit.  After
you add the desired indices to the JSON file, rerunning the script will load
those indices, convert them to concrete RGB triplets, and continue with RGB
point-cloud generation without further user intervention.

"""
import numpy as np
import logging
import warnings
import json
import sys
import os  # Needed for file existence checks
import copy
from typing import Dict, Any, Optional, List
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import torch
from copy import deepcopy
warnings.filterwarnings('ignore')
from unproject import create_pointcloud_from_depth_and_colors,test_output_dir,CUSTOM_NAME
from pathlib import Path
from loadJson import _load_seg_json, _load_emb_name_map, _save_seg_json
from time_it import timeit
from injPcd import injPcd
sys.path.append(str(Path(__file__).parent.parent / "debugger"))
from save_file import save_info
from cameraFilter import camera_mapping

UNIQUE_COLORS = None
SEG_IMG = None
CNT=0

def interpret_camera_name(short_name: str) -> str:
    if not short_name: return ""
    result = ""
    for i, c in enumerate(short_name):
        if c == 'R' and i == 0: result += "real"
        elif c == 'R': result += "_real"
        elif c == 'm': 
            result += "_mid" if i > 0 else "mid"
        else: 
            if i > 0 and short_name[i-1] == 'm':
                result += camera_mapping[c]
            else:
                result += "_" + camera_mapping[c].lower() if i > 0 else camera_mapping[c].lower()
        # if i == len(short_name) - 1: result += "_camera"
    return result

class RGBPCD:
    @timeit()
    def __init__(self, unifiedData: dict[str, np.ndarray],  param, surpress_print,color_key,output_key,seg_source):
        self.unifiedData = unifiedData
        self.depth_scale = 0.001
        self.min_depth = 0.1
        self.param = param
        self.mask_param = param.split('_')[0]
        if len(self.param.split('_'))>1:
            self.cam_firstname_short= self.param.split('_')[1]
        else:
            self.cam_firstname_short=  'mb'
        self.surpress_print = surpress_print
        self.camera_for_seg = 'midBack_camera'
        self.cam_refreal_view = interpret_camera_name(self.cam_firstname_short)
        # if self.camera_for_seg not in unifiedData['cameras']:
        #     self.camera_for_seg = 'midBack_real_camera'
        global UNIQUE_COLORS, SEG_IMG
        if UNIQUE_COLORS is None or SEG_IMG is None:
            self.unique_colors, self.seg_img = self.get_unique_colors(self.unifiedData, self.camera_for_seg)
            UNIQUE_COLORS = self.unique_colors
            SEG_IMG = self.seg_img
        else:
            self.unique_colors = UNIQUE_COLORS
            self.seg_img = SEG_IMG
        self.PRINTED=False
        self.seg_source = seg_source
        self.embNameMap = _load_emb_name_map()
        assert 'task_name' in unifiedData, "unifiedData must contain task_name field"
        task_name = unifiedData['task_name']
        self.seg_json = _load_seg_json(seg_source, task_name)
        self.color_key=color_key
        self.output_key=output_key
        if not self.surpress_print:
            print("seg_json",self.seg_json)
            print("embNameMap",self.embNameMap)
        print("mask_param",self.mask_param)
        self.emb_code = self.get_char(self.mask_param,0)
        self.omni_code = self.get_char(self.mask_param,1)
        self.vis_flag = self.get_num_or_0(self.mask_param,1)
        self.deploy_flag = self.get_num_or_0(self.mask_param,2)
        self.calib_flag = self.get_num_or_0(self.mask_param,3)
    def get_num_or_0(self,string,position):
        if len(string)<=position or position<0:
            return 0
        if string[position].isdigit():
            return int(string[position])
        return 0
    def get_char(self,string,position):
        if len(string)<=position or position<0:
            return ''
        return string[position]
    def _save_seg_json(self):
        _save_seg_json(self.seg_json, self.seg_source, self.unifiedData['task_name'])
    @timeit()
    def get_unique_colors(self, unifiedData, camera_name):
        seg_img = unifiedData['cameras'][camera_name].get('segmentation', None)
        if seg_img is None:
            print(f"No segmentation image found for camera {camera_name}.")
            return [], None
        flattened_colors = seg_img.reshape(-1, seg_img.shape[-1])
        unique_colors = np.unique(flattened_colors, axis=0)
        sorted_indices = np.lexsort((unique_colors[:, 2], unique_colors[:, 1], unique_colors[:, 0]))
        unique_colors = unique_colors[sorted_indices]
        return unique_colors, seg_img
    def get_seg_json_field(self, emb_key, field_key, no_exit=False):
        if not self.surpress_print:
            print("get_seg_json_field",emb_key,field_key,no_exit)
        emb_entry = self.seg_json.get(emb_key, {})
        if not isinstance(emb_entry, dict):
            emb_entry = {}
        field_entry = emb_entry.get(field_key, {})
        if not isinstance(field_entry, dict):
            field_entry = {}
        indices: list[int] = field_entry.get('indices', [])
        colors: list[list[int]] = field_entry.get('colors', []) or []
        if not self.surpress_print:
            print("get_seg_json_field with indices and colors",indices,colors)
        if colors:
            # if not self.PRINTED:
            #     print(f"Loaded {len(colors)} pre-computed colors for {emb_key}/{field_key}.")
            #     self.PRINTED = True
            return colors
        # If indices present, extract colors from seg img, update JSON, return
        if indices:
            selected_colors: list[list[int]] = []
            for idx in indices:
                if 0 <= idx < len(self.unique_colors):
                    selected_colors.append(self.unique_colors[idx].tolist())
                else:
                    print(f"Warning: index {idx} is out of range for {emb_key}/{field_key} and will be ignored.")
            if not selected_colors:
                print(f"Error: none of the provided indices matched valid colours for {emb_key}/{field_key} – aborting.")
                if not no_exit:
                    sys.exit(1)
                return []
            # Persist colors to JSON
            if not isinstance(self.seg_json, dict):
                raise RuntimeError("seg_json must be a dict")
            if emb_key not in self.seg_json or not isinstance(self.seg_json[emb_key], dict):
                self.seg_json[emb_key] = {}
            if field_key not in self.seg_json[emb_key] or not isinstance(self.seg_json[emb_key][field_key], dict):
                self.seg_json[emb_key][field_key] = {} # type: ignore
            self.seg_json[emb_key][field_key]['colors'] = selected_colors # type: ignore
            self._save_seg_json()
            if not self.surpress_print:
                print(f"Wrote {len(selected_colors)} colors for {emb_key}/{field_key} to allowed_seg_colors.json.")
            return selected_colors

        print(f"No indices found for {emb_key}/{field_key}. Generating annotated segmentation image …")

        self.plot_seg( camera_name=self.camera_for_seg, embodiment=f"{emb_key}_{field_key}")
        print(f"Please add the desired index numbers to {Path(__file__).parent}/allowed_seg_json/{self.unifiedData['task_name']}/allowed_seg_colors-{self.seg_source}.json for {emb_key}/{field_key} and rerun.")
        if no_exit:
            return []
        sys.exit(0)

    def plot_seg(self, camera_name, embodiment):
        """
        Plot the segmentation image for a given camera and save it with annotations of unique colors and their RGB values.
        Args:
            unifiedData: dict containing the data, must have unifiedData['cameras'][camera_name]['segmentation']
            camera_name: which camera to plot
            embodiment: embodiment name for the output filename
        """
        print(f"DEBUG: seg_img shape: {self.seg_img.shape}, dtype: {self.seg_img.dtype}, min: {self.seg_img.min()}, max: {self.seg_img.max()}")

        fig = plt.figure(figsize=(16, 10))
        
        ax1 = plt.subplot2grid((1, 4), (0, 0), colspan=3)
        img_plot = ax1.imshow(self.seg_img)
        ax1.set_title(f"Segmentation for {camera_name}", fontsize=14, fontweight='bold')
        ax1.set_xlabel('X (pixels)')
        ax1.set_ylabel('Y (pixels)')
        
        # Sidebar subplot (right side)
        ax2 = plt.subplot2grid((1, 4), (0, 3))
        ax2.set_xlim(0, 1)
        ax2.set_ylim(0, len(self.unique_colors) + 1)
        ax2.set_title("Color Labels", fontsize=12, fontweight='bold')
        ax2.axis('off')
        for i, color in enumerate(self.unique_colors):
            color_mask = np.all(self.seg_img == color, axis=2)
            if np.any(color_mask):
                y_coords, x_coords = np.where(color_mask)
                center_x = float(np.mean(x_coords))
                center_y = float(np.mean(y_coords))
                norm_color = color / 255.0
                ax1.plot(center_x, center_y, 'o', markersize=12, markeredgewidth=2, 
                    markeredgecolor='white', markerfacecolor=norm_color)
                ax1.text(center_x +3, center_y, str(i), 
                        fontsize=12, color='white', weight='bold',
                        bbox=dict(boxstyle="round,pad=0.3", facecolor='black', alpha=0.7))
        for i, color in enumerate(self.unique_colors):
            norm_color = color / 255.0
            label_y = len(self.unique_colors) - i - 0.5
            rect = patches.Rectangle((0.1, label_y - 0.3), 0.2, 0.6,facecolor=norm_color, edgecolor='black', linewidth=1)
            ax2.add_patch(rect)
            rgb_text = f"{i}: [ ] ({color[0]},{color[1]},{color[2]})"
            ax2.text(0.35, label_y, rgb_text, 
                    fontsize=10, verticalalignment='center')
        
        plt.tight_layout()
        
        output_dir = Path(__file__).parent / "seg_imgs" / self.unifiedData['task_name'] / self.seg_source / ""
        os.makedirs(output_dir, exist_ok=True)
        output_filename = f"segmentation_{camera_name}_{embodiment}_annotated.png"
        output_path = output_dir / output_filename
        plt.savefig(output_path, dpi=300, bbox_inches='tight')
        print(f"Segmentation image saved as: {output_path}")
        plt.close()  
        return self.unique_colors
    def dict_flag_merge_mask(self,seg_dict,flag):
        colors_list = []
        mask_list = []
        seg_config = {'env': 0b001, 'gripper': 0b010, 'arm': 0b100}
        for part_name, part_data in seg_dict.items():
            if (flag & seg_config[part_name]) == seg_config[part_name]:
                # print("part_name",part_name,'flag',flag,'ref flag',seg_config[part_name])
                colors,mask = part_data["colors"],part_data["mask"]
                colors_list.extend(colors)
                mask_list.append(mask)
        mask_dict = self.merge_mask_list(mask_list)
        return colors_list, mask_dict
    
    def merge_mask_list(self, mask_list):
        mask_dict = {}
        for mask in mask_list:
            mask_dict = self.merge_mask(mask_dict, mask)
        return mask_dict
    
    def merge_mask(self, Adict, Bdict):
        for key, value in Bdict.items():
            if key not in Adict:
                Adict[key] = value
            else:
                Adict[key] |= value
        return Adict
    
    def get_seg(self,emb_key,part_name):
        colors = self.get_seg_json_field(emb_key, part_name)
        mask = self.seg_to_mask(colors)
        self.unifiedData[str(part_name[0] if len(part_name)>0 else '')+'c'] = sum(mask.sum() for mask in mask.values())
        return colors, mask

    def mask_to_rgbd_npy(self,camera_key,uni_key):
        global CNT
        if camera_key not in self.unifiedData['cameras']:
            print(f"mask_to_rgbd_npy: camera {camera_key} not in unifiedData")
            return
        rgb =self.unifiedData['cameras'][camera_key]['rgb'].copy()
        # print(f"mask_to_rgbd_npyrgb {camera_key} {uni_key}",rgb.shape)
        depth =self.unifiedData['cameras'][camera_key]['depth'].copy()
        # print(f"mask_to_rgbd_npydepth {camera_key} {uni_key}",depth.shape)
        if uni_key !="":
            mask = self.unifiedData[uni_key].copy()
            depth [~mask] = 0
        depth_expanded = np.expand_dims(depth,axis=-1)
        rgbd = np.concatenate([rgb,depth_expanded],axis=-1)
        save_dir = Path(__file__).parent.parent / "inject_pcd" / "adjust" / CUSTOM_NAME / uni_key / f"{CNT}"
        os.makedirs(save_dir, exist_ok=True)
        save_info("depth", save_dir / camera_key , depth)
        save_info("img", save_dir / camera_key, rgb)
        save_info("rgbd",save_dir / camera_key, rgbd)
        CNT+=1
    def mask_to_unifiedData(self,seg_dict,flag,camera_key,uni_key,no_save=False):
        colors,mask = self.dict_flag_merge_mask(seg_dict,flag)
        assert camera_key in mask, f"{camera_key} not in mask, {mask.keys()}"
        self.unifiedData[uni_key] = mask[camera_key].copy() 
        if not no_save:
            self.mask_to_rgbd_npy(camera_key,uni_key)
        return colors,mask
    @staticmethod
    def seg_to_mask_static(unifiedData: dict[str, np.ndarray],desired_colors: list[list[int]],single_cam_field=None) -> dict[str, np.ndarray]:
        masks: dict[str, np.ndarray] = {}
        if 'cameras' not in unifiedData or not unifiedData['cameras']:
            print("seg_to_mask: no camera data available")
            return masks
        desired_set = {tuple(map(int, c)) for c in desired_colors}
        cams_field = unifiedData.get('cameras')
        cameras_dict = cams_field if isinstance(cams_field, dict) else {}
        for cam_name, cam_data in cameras_dict.items():
            # if cam_name=="head_camera":
            #     continue
            if single_cam_field and cam_name != single_cam_field:
                continue
            seg_img = cam_data.get('segmentation')
            if seg_img is None or seg_img.size == 0:
                continue
            mask = np.zeros(seg_img.shape[:2], dtype=bool)
            for col in desired_set:
                col_arr = np.array(col, dtype=seg_img.dtype).reshape(1, 1, 3)
                match = np.all(seg_img == col_arr, axis=2)
                mask |= match
            masks[cam_name] = mask
        return masks
    @timeit()
    def seg_to_mask(self, desired_colors: list[list[int]]) -> dict[str, np.ndarray]:
        return RGBPCD.seg_to_mask_static(self.unifiedData,desired_colors)
    @timeit()
    def get_mask_dict(self):
        allowed_seg_colors = []
        vis_mask_dict={}
        # if self.emb_code == 'n':
        #     allowed_seg_colors = [color.tolist() for color in self.unique_colors if not np.array_equal(color, [0, 0, 0])]
        #     vis_mask_dict=self.seg_to_mask(allowed_seg_colors)
        # else:
        if self.emb_code == 'n':
            print("legacy pipeline detected, please update pcdn or segn to pcd%n or seg%n")
            exit(0)
        emb_key = self.embNameMap.get(self.emb_code, self.emb_code)
        seg_config = {'env': 0b001, 'gripper': 0b010, 'arm': 0b100}
        seg_dict={}
        master_flag=self.vis_flag | self.deploy_flag | self.calib_flag
        print("master_flag",master_flag,"vis_flag",self.vis_flag,"deploy_flag",self.deploy_flag,"calib_flag",self.calib_flag)
        for part_name, bit_flag in seg_config.items():
            if (master_flag & bit_flag) == bit_flag:
                colors, mask = self.get_seg(emb_key,part_name)
                seg_dict[part_name] = {"colors":colors,"mask":mask}
        if self.omni_code == 'n':
            allowed_seg_colors = [color.tolist() for color in self.unique_colors if not np.array_equal(color, [0, 0, 0])]
            # print("omni_code",self.omni_code,"allowed_seg_colors",allowed_seg_colors)
            vis_mask_dict=self.seg_to_mask(allowed_seg_colors)
            vis_mask_dict = deepcopy(vis_mask_dict)
        elif self.vis_flag:
            allowed_seg_colors,vis_mask_dict = self.mask_to_unifiedData(seg_dict,self.vis_flag,f'{self.cam_refreal_view}_camera','visualize_mask',no_save=True)
            vis_mask_dict = deepcopy(vis_mask_dict)
        if self.deploy_flag:
            self.mask_to_unifiedData(seg_dict,self.deploy_flag,f'{self.cam_refreal_view}_real_camera','deploy_mask')
        if self.calib_flag:
            self.mask_to_unifiedData(seg_dict,self.calib_flag,f'{self.cam_refreal_view}_camera','calib_mask')
            self.mask_to_rgbd_npy('real_camera','')

        cnt=0
        for key,val in vis_mask_dict.items():  
            cnt+=val.sum()  
        print("cnt",cnt)
        return vis_mask_dict
    @timeit()
    def rgbPcd(
        self,
    ) -> dict[str, np.ndarray]:
        mask_dict = self.get_mask_dict()
        if self.vis_flag or self.omni_code =='n':
            self.unifiedData = create_pointcloud_from_depth_and_colors(
                unifiedData=self.unifiedData,
                color_key=self.color_key,
                output_key=self.output_key,
                mask_pixels=mask_dict,
                surpress_print=self.surpress_print,
            )
        if self.deploy_flag:
            injPcd(self.unifiedData)
        # print("result pointcloud shape",result["repointDcloud"].shape)
        return self.unifiedData

@timeit()
def rgbPcd(unifiedData,param,surpress_print,color_key,output_key,seg_source="traverse"):
    if not surpress_print:
        print("rgbPcd",param,surpress_print)
    rgbPcd=RGBPCD(unifiedData,param=param,surpress_print=surpress_print,color_key=color_key,output_key=output_key,seg_source=seg_source)
    return rgbPcd.rgbPcd()