import os
import threading
from collections import deque
import re
from PIL import Image
import numpy as np
import torch
import rclpy
from rclpy.executors import ExternalShutdownException
from rclpy.executors import MultiThreadedExecutor

from llava.constants import IMAGE_TOKEN_INDEX
from llava.conversation import SeparatorStyle, conv_templates
from llava.mm_utils import KeywordsStoppingCriteria, process_images, tokenizer_image_token
from llava.model.builder import load_pretrained_model
from vlnce_baselines.navila_trainer import sample_and_pad_images
from ros2_utils import ParamManager, RobotEnv


def main(args=None):
    rclpy.init(args=args)
    try:
        node = rclpy.create_node('navila_inference')
        rate = node.create_rate(2)  # 2Hz频率执行
        param_manager = ParamManager()
        param_manager.set_params({"color_topics": ["/zed/zed_node/rgb/image_rect_color",]})
        env = RobotEnv(node, param_manager.get_params())
        instruction = "Your navigation task instruction here"
        vla = VLA(checkpoint_path="/path/to/your/checkpoint")
        while rclpy.ok():
            try:
                rclpy.spin_once(node, timeout_sec=0.1)
                curr_rgb = env.latest_rgb(1)[0]
                output = vla.inference(curr_rgb, instruction)
                vla.take_action(env, output)
            except KeyboardInterrupt:
                break
            except Exception as e:
                print(f"Exception in VLA loop: {e}")
                break
            rate.sleep()
    except KeyboardInterrupt:
        pass
    except ExternalShutdownException:
        pass
    finally:
        del vla
        del env
        node.destroy_node()
        rclpy.shutdown()


class VLA:
    def __init__(self, checkpoint_path: str):
        self.checkpoint_path = checkpoint_path
        self.model_name = os.path.basename(os.path.normpath(checkpoint_path))
        self.tokenizer, self.model, self.image_processor, self.context_len = load_pretrained_model(checkpoint_path, self.model_name)
        self.model = self.model.cuda()
        self.num_video_frames = self.model.config.num_video_frames
        self.past_rgbs = deque(maxlen=1000)  # 保持最近的 num_video_frames - 1 帧

    @staticmethod
    def take_action(env: RobotEnv, command: str) -> None:
        # Define the regex patterns for each action
        patterns = {
            0: re.compile(r"\bstop\b", re.IGNORECASE),
            1: re.compile(r"\bis move forward\b", re.IGNORECASE),
            2: re.compile(r"\bis turn left\b", re.IGNORECASE),
            3: re.compile(r"\bis turn right\b", re.IGNORECASE),
        }

        # Function to map a string to an action integer
        def map_string_to_action(s):
            for action, pattern in patterns.items():
                if pattern.search(s):
                    return action
            return None  # Return None if no match is found

        try:
            actions = [map_string_to_action(command)]
        except:
            actions = [1]
        print(actions)

        # 动作执行
        if actions[0] == 1:
            try:
                match = re.search(r"move forward (\d+) cm", command)
                distance = int(match.group(1))
            except:
                distance = 25
            # 前进 distance
            env.step(1, distance)

        elif actions[0] == 2:
            try:
                match = re.search(r"turn left (\d+) degree", command)
                degree = int(match.group(1))
            except:
                degree = 15
            # 左转 degree
            env.step(2, degree)

        elif actions[0] == 3:
            try:
                match = re.search(r"turn right (\d+) degree", command)
                degree = int(match.group(1))
            except:
                degree = 15
            # 右转degree
            env.step(3, degree)

        else:  # 0, stop
            # stop
            env.step(0)


    def inference(self, curr_rgb: np.ndarray, instruction: str) -> str:
        with torch.no_grad():
            self.past_rgbs.append(curr_rgb)

            past_and_current_rgbs = sample_and_pad_images(list(self.past_rgbs), num_frames=self.num_video_frames)

            interleaved_images = "<image>\n" * (len(past_and_current_rgbs) - 1)

            frame_length = len(past_and_current_rgbs)
            print(f"input frame length {frame_length}")

            question = (
                f"Imagine you are a robot programmed for navigation tasks. You have been given a video "
                f'of historical observations {interleaved_images}, and current observation <image>\n. Your assigned task is: "{instruction}" '
                f"Analyze this series of images to decide your next action, which could be turning left or right by a specific "
                f"degree, moving forward a certain distance, or stop if the task is completed."
            )

            conv_mode = "llama_3"
            conv = conv_templates[conv_mode].copy()
            conv.append_message(conv.roles[0], question)
            conv.append_message(conv.roles[1], None)
            prompt = conv.get_prompt()

            images_tensor = process_images(past_and_current_rgbs, self.image_processor, self.model.config).to(
                self.model.device, dtype=torch.float16
            )
            input_ids = (
                tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt")
                .unsqueeze(0)
                .cuda()
            )

            stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
            keywords = [stop_str]
            stopping_criteria = KeywordsStoppingCriteria(keywords, self.tokenizer, input_ids)

            with torch.inference_mode():
                output_ids = self.model.generate(
                    input_ids,
                    images=images_tensor.half().cuda(),
                    do_sample=False,
                    temperature=0.0,
                    max_new_tokens=32,
                    use_cache=True,
                    stopping_criteria=[stopping_criteria],
                    pad_token_id=self.tokenizer.eos_token_id,
                )

            outputs = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
            outputs = outputs.strip()

            if outputs.endswith(stop_str):
                outputs = outputs[: -len(stop_str)]
            outputs = outputs.strip()
            print(outputs)
            self.past_rgbs.append(curr_rgb)

            return outputs


if __name__ == "__main__":
    main()