import sys
sys.path.append("/home/fd_chen/in_context_RL/spoc-robot-training")
import debugpy
import os,copy
import json
import shutil
import time
import gc
import time
import random
import math
import h5py

from tqdm import tqdm
from multiprocessing import Pool

from collections import deque

from environment.stretch_controller import StretchController
from utils.data_generation_utils.navigation_utils import is_any_object_sufficiently_visible_and_in_center_frame
from utils.constants.stretch_initialization_utils import STRETCH_ENV_ARGS, AGENT_ROTATION_DEG, AGENT_MOVEMENT_CONSTANT
from utils.debug_utils import enable_remote_debug

from expert.actions import shortest_path_to_actions, plot_grid_map, determine_turn_actions_
from expert.functions import (l2_distance, 
                            load_procthor_houses,
                            save_data_json, 
                            check_agent_position,
                            save_img_from_frame,
                            positions2path,
                            save_navigation_frame,
                            save_as_vedio,
                            build_dir)

# Constants
MEDIUM_RANGE = (0.2, 0.6)
TARGET_RANGE = 0.8  # All targets
DEBUG = True
MAX_FRAME = 10000
MAX_AGENT_OBJECT_DISTANCE = 1
MAX_RANDOM_AGENT_POSITION_ATTEMPTS = 200
MAX_ATTEMPTS = 200
NUM_PROCESSES = 48
ABSOLUTE_MIN_PIXELS = 100
ALNATIVE_ACTIONS = [ 'm','b', 'l', 'r', 'ls','rs']
SPLIT= "train"

class Expert(StretchController):
    def __init__(self, id=0,root=f'./generator_data_debug/{SPLIT}', **kwargs):
        """Initialize the Expert controller."""
        super().__init__(**kwargs)
        self.id = id
        self.successful_objects = []
        self.root = root
        self.root_dir = build_dir( f"{self.root}/{self.id}")
        self.status = [] # save as h5py

        os.makedirs(f"{self.root}/{id}", exist_ok=True)
        save_data_json(houses[id], f"{self.root}/{id}/house.json")

    def build_object_dir(self,target=None):
        """Check and create necessary directories."""
        self.frame_dir = f"{self.root}/{self.id}/{target['objectType']}|{target['objectId']}/frame"
        self.metadata_dir = f"{self.root}/{self.id}/{target['objectType']}|{target['objectId']}/metadata"
        
        for dir in [self.frame_dir, self.metadata_dir]:
            if os.path.exists(dir):
                shutil.rmtree(dir)
            os.makedirs(dir, exist_ok=True)

        
    def target_types(self, datapath="./expert/domain/procthor.yaml"):
        """Load target object types from a YAML file."""
        import yaml
        with open(datapath, 'r') as file:
            data = yaml.safe_load(file)
        return data.get('target_object_types', [])

    def get_objects_(self):
        """Get objects from the environment."""
        with self.include_object_metadata_context():
            return list(self.controller.last_event.metadata["objects"])

    #split the reachable points
    def split_reachable_area(self, object_id,mode = 'split'):
        """Split the reachable area into difficulty levels."""
        length_from_positions = [(l2_distance(self.get_obj_pos_from_obj_id(object_id), pos), pos) for pos in self.get_reachable_positions()]
        if len(length_from_positions)<10:
            print(f"{self.id}length-from-positions",len(length_from_positions))
        length_from_positions.sort(key=lambda x: x[0])
        if mode == "all":
            return {
                "easy":length_from_positions,
                "medium": length_from_positions,
                "hard":length_from_positions
            }
        return {
            "easy": length_from_positions[
                : int(len(length_from_positions) * MEDIUM_RANGE[0])
            ],
            "medium": length_from_positions[
                int(len(length_from_positions) * MEDIUM_RANGE[0]) : int(
                    len(length_from_positions) * MEDIUM_RANGE[1]
                )
            ],
            "hard": length_from_positions[
                int(len(length_from_positions) * MEDIUM_RANGE[1]) :
            ],
        }

    def random_agent_position(self,objectid,difficulty=random.choice(["easy","medium","hard"])):
        """Set a random agent position."""
        old_agent_location = self.get_current_agent_full_pose()
        self.difficulty_spots =self.split_reachable_area(objectid)[difficulty]
        position = self.difficulty_spots[random.randint(0, len(self.difficulty_spots) - 1)][1]

        rot_y = random.choice(list(range(0, 360, int(AGENT_ROTATION_DEG))))
        
        event = self.controller.step(
            action="Teleport",
            position=position,
            rotation=dict(x=0, y=rot_y, z=0),
            standing=old_agent_location["isStanding"],
        )
        return event.metadata["lastActionSuccess"]

    def teleport_agent(self, agent_pose, teleport_horizon=False):
        """Teleport agent to a given pose."""
        action_params = {
            'action': 'Teleport',
            'position': agent_pose['position'],
            'rotation': agent_pose['rotation'],
            'standing': agent_pose['isStanding']
        }
        if teleport_horizon:
            action_params['horizon'] = agent_pose["cameraHorizon"]
        return self.controller.step(**action_params).metadata["lastActionSuccess"]


    def full_objects(self):
        """Get a random selection of PART objects."""
        objects = self.get_filtered_objects()
        selected_objects = random.sample(objects, int(len(objects) * TARGET_RANGE))
        return selected_objects

    def get_filtered_objects(self):
        """Get filtered objects based on target types."""
        return [o for o in self.get_objects_() if o["objectType"] in self.target_types()]


    def get_actions_from_shortest_path(self, object_id):
        """Convert shortest path to actions."""
        shortest_path = self.get_shortest_path_to_object(object_id)
        return shortest_path_to_actions(
            shortest_path, self.get_current_agent_full_pose()
        )

    def get_agent_distance_to_object(self, object_id):
        """Get the distance between the agent and an object."""
        object_info = self.get_object(object_id)
        agent_info = self.get_current_agent_full_pose()
        return math.hypot(object_info["position"]["x"] - agent_info["position"]["x"],
                          object_info["position"]["z"] - agent_info["position"]["z"])  
    
    def perform_action_with_retry(self, action, alternative_actions, max_retries=3):

        retries = 0
        while retries < max_retries:
            agent_event = self.agent_step(action)
            if agent_event.metadata["lastActionSuccess"]:
                return True, action
            retries += 1

        for alt_action in alternative_actions:
            if alt_action == 'b' and random.random() < 0.8:  # Reduce the probability of action 'b'
                continue
            retries = 0
            while retries < max_retries:
                agent_event = self.agent_step(alt_action)
                if agent_event.metadata["lastActionSuccess"]:
                    return True, alt_action
                retries += 1
                # print(f"Alternative action {alt_action} failed. Retrying ({retries}/{max_retries})...")
        return False, None

    
    def is_agent_position_same_shortest_path_start(self, shortest_path,threshold=0.05):
        agent_position = self.get_current_agent_full_pose()["position"]
        start_position = shortest_path[0]
        return l2_distance(agent_position, start_position) < threshold


    def get_position_rotions(self, agent_location):
        """Get positions and rotations from a list of positions."""
        return [
                    agent_location['position']['x'], agent_location['position']['y'],agent_location['position']['z'],
                    agent_location['rotation']['x'], agent_location['rotation']['y'],agent_location['rotation']['z']
                ]

    def reset_task(self,target):
        """Reset the task."""
        self.target = target
        self.target_id = target['objectId']

        self.init_agent_pose = self.get_current_agent_full_pose()
        self.actions = [' ']
        self.positions = [self.get_position_rotions(self.init_agent_pose)]
        self.reference = ['init'] # choice ['expert','random']
        self.nav_frames = deque([self.navigation_camera], maxlen=MAX_FRAME)  # Navigation frame buffer
        self.last_actions_success = [True] # ['start',True,False]

        # Create directories
        self.build_object_dir(self.target)

    def update_state(self,action = '',full_last_agent_location=None,reference=['expert']):
        """Update the state of the environment."""      

        self.actions.append(action)
        self.nav_frames.append(self.navigation_camera)
        self.positions.append(self.get_position_rotions(self.get_current_agent_full_pose() if full_last_agent_location is None else full_last_agent_location))
        self.reference.append(reference)

    def save_collected_data(self):
        """Save agent's collected data into both JSON and HDF5 formats."""
        
        self.successful_objects.append(f"{self.target['objectType']}|{self.target['objectId']}")

        # Save data to JSON files
        save_data_json(self.successful_objects, f"{self.root_dir}/successful_objects.json")
        save_data_json(self.init_agent_pose, f"{self.metadata_dir}/agent.json")
        save_data_json(self.target, f"{self.metadata_dir}/object.json")
        save_data_json(self.positions, f"{self.metadata_dir}/positions.json")
        save_data_json(self.actions, f"{self.metadata_dir}/actions.json")
        save_data_json(self.reference, f"{self.metadata_dir}/reference.json")
        
        # Save navigation frames to a directory
        save_navigation_frame(self.nav_frames, self.frame_dir)
        save_as_vedio(self.nav_frames,self.metadata_dir)
        # Generate a top-down view of the agent's path and save it as an image
        shortest_path = positions2path(self.positions)
        top_down_path_frame = self.get_top_down_path_view(shortest_path)
        save_img_from_frame(top_down_path_frame, save_path=f"{self.metadata_dir}/test_top_down_along_path.jpg")
        
        # Save the robot's path as a plot
        plot_grid_map(shortest_path, self.actions, self.init_agent_pose, step_length=AGENT_MOVEMENT_CONSTANT, save_path=f"{self.metadata_dir}/robot_path.jpg")




    def plot_path(self, previous_shortest_path, previous_actions, attempts):
        """Plot the grid map with the current path and actions."""
        plot_grid_map(previous_shortest_path, previous_actions, self.get_current_agent_full_pose(),
                    step_length=AGENT_MOVEMENT_CONSTANT, save_path=f"{self.metadata_dir}/robot_path_{attempts}.jpg")

    def verify_get_actions_from_shortest_path(self, object_id):
        debugpy.breakpoint()
        attempts = 0
        while attempts < MAX_ATTEMPTS :

            if self.get_agent_distance_to_object(object_id)<= MAX_AGENT_OBJECT_DISTANCE:
                self.execute_final_action(object_id)
                self.update_state(action = 'end')
                self.save_collected_data()
                return self.status

            previous_shortest_path = self.get_shortest_path_to_object(object_id,attempt_path_improvement=True)

            if previous_shortest_path is None:
                raise Exception("Failed to retrieve the shortest path. The object is not reachable.") 

            if not self.is_agent_position_same_shortest_path_start(previous_shortest_path):
                previous_shortest_path.insert(0, self.get_current_agent_full_pose()["position"])

            previous_actions = shortest_path_to_actions(previous_shortest_path, self.get_current_agent_full_pose())
            self.plot_path( previous_shortest_path, previous_actions, attempts)#debug

            for action in previous_actions:
                if action == "end":
                    self.execute_final_action(object_id)
                    self.update_state(action = 'end')
                    self.save_collected_data()
                    return self.status
                previous_action = copy.deepcopy(action)
                previous_agent_location = self.get_current_agent_full_pose()
                action_is_success, action = self.perform_action_with_retry(action, alternative_actions=ALNATIVE_ACTIONS)
                if previous_action != action:
                    reference = "random"
                else:
                    reference = "expert"
                if not action_is_success:
                    # self.teleport_agent(something any where)
                    raise Exception(f"Action {action} and all alternatives failed.")
                
                full_last_agent_location = self.get_current_agent_full_pose()

                if self.get_agent_distance_to_object(object_id)<= MAX_AGENT_OBJECT_DISTANCE:
                    
                    self.update_state(action = action,full_last_agent_location=full_last_agent_location,reference=reference)
                    self.execute_final_action(object_id)
                    self.update_state(action = 'end')
                    self.save_collected_data()
                    return self.status

                if not check_agent_position(previous_shortest_path, full_last_agent_location, threshold=0.2):
                    self.teleport_agent(previous_agent_location)
                    break  

                self.update_state(action = action,full_last_agent_location=full_last_agent_location,reference=reference)

            attempts += 1

        # debugpy.breakpoint()
        return Exception("Max attempts reached. Object is still not visible.")

    def execute_final_action(self, object_id):
        if not is_any_object_sufficiently_visible_and_in_center_frame(
                        self, [object_id], absolute_min_pixels=ABSOLUTE_MIN_PIXELS
                    ):
            angle_diff = self.get_agent_alignment_to_object(object_id)
            actions_ = determine_turn_actions_(angle_diff)
            for action in actions_:
                agent_event = self.agent_step(action)
                if not agent_event.metadata["lastActionSuccess"]:
                                # self.teleport_agent(something any where)
                    raise Exception(f"Action {action} failed.")
                self.update_state(action = action)
                if is_any_object_sufficiently_visible_and_in_center_frame(
                                self, [object_id], absolute_min_pixels=ABSOLUTE_MIN_PIXELS
                            ):
                    break

def initialize_expert_controller( max_init_attempts=5, stretch_env_args=None, houses=None,house_id = 0,root='./generator_data_debug'):
    """Attempt to initialize the Expert controller with retries."""
    init_attempts = 0
    stretch_env_args = stretch_env_args or {}
    stretch_env_args['gpu_device'] = house_id % 3  # Assign a GPU device based on house_id
    
    while init_attempts < max_init_attempts:
        try:
            Expert_controller = Expert(id=house_id,root=root, **stretch_env_args)
            Expert_controller.reset(houses[house_id])
            return Expert_controller  # Successfully initialized, return controller
        except TimeoutError as e:
            init_attempts += 1
            time.sleep(2)  # Wait before retrying

    return None

def process_house(args):

    houses, house_id = args  # Unpack the arguments
    Expert_controller : Expert = initialize_expert_controller(5, STRETCH_ENV_ARGS, houses,house_id) 

    for target  in Expert_controller.full_objects():
        print(target['objectId'])
        RETRIEVE = True
        attemp = 0
        while RETRIEVE and attemp < MAX_RANDOM_AGENT_POSITION_ATTEMPTS:
            try:
                if  attemp : # the place is failed
                    Expert_controller.random_agent_position(target['objectId'],"hard")

                Expert_controller.reset_task(target)
                shortest_path = Expert_controller.get_shortest_path_to_object(target['objectId'])
                if shortest_path is None:
                    raise Exception("Failed to retrieve the shortest path.")

                status = Expert_controller.verify_get_actions_from_shortest_path(target['objectId'])

                print(f"House {house_id} object {target['objectType'],target['objectId']} completed.")
                RETRIEVE = False
                gc.collect()
                time.sleep(0.5)

            except Exception as e:
                print(f"House {house_id} object {target['objectType'],target['objectId']} failed: {e}")
                attemp += 1
                RETRIEVE = True  # Keep RETRIEVE
                gc.collect()

    Expert_controller.stop()
    gc.collect()
    
if __name__ == "__main__":
    # enable_remote_debug(65532)
    # houses = list(load_procthor_houses()[2])
    houses = load_procthor_houses(mode = "train")
    shutil.rmtree(f"./generator_data_debug/{SPLIT}", ignore_errors=True)
    with Pool(processes=NUM_PROCESSES) as pool:
        for _ in tqdm(pool.imap_unordered(process_house, [(houses, house_id) for house_id in range(len(houses))]), total=len(houses)):
            pass
    
