import os
from llava.model.builder import load_pretrained_model
import torch
from PIL import Image
import numpy as np
import re


def main(checkpoint_path: str):
    model_name = os.path.basename(os.path.normpath(checkpoint_path))
    tokenizer, model, image_processor, context_len = load_pretrained_model(checkpoint_path, model_name)
    model = model.cuda()
    with torch.no_grad():
        curr_rgb = Image.fromarray(np.uint8(batch[0]["rgb"].cpu().numpy())).convert("RGB")

        past_and_current_rgbs = past_rgbs[0] + [curr_rgb]
        num_video_frames = model.config.num_video_frames

        past_and_current_rgbs = sample_and_pad_images(past_and_current_rgbs, num_frames=num_video_frames)

        instruction = current_episodes[0].instruction.instruction_text

        interleaved_images = "<image>\n" * (len(past_and_current_rgbs) - 1)

        frame_length = len(past_and_current_rgbs)
        print(f"input frame length {frame_length}")

        question = (
            f"Imagine you are a robot programmed for navigation tasks. You have been given a video "
            f'of historical observations {interleaved_images}, and current observation <image>\n. Your assigned task is: "{instruction}" '
            f"Analyze this series of images to decide your next action, which could be turning left or right by a specific "
            f"degree, moving forward a certain distance, or stop if the task is completed."
        )

        conv_mode = "llama_3"
        conv = conv_templates[conv_mode].copy()
        conv.append_message(conv.roles[0], question)
        conv.append_message(conv.roles[1], None)
        prompt = conv.get_prompt()

        images_tensor = process_images(past_and_current_rgbs, image_processor, model.config).to(
            model.device, dtype=torch.float16
        )
        input_ids = (
            tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt")
            .unsqueeze(0)
            .cuda()
        )

        stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
        keywords = [stop_str]
        stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)

        with torch.inference_mode():
            output_ids = model.generate(
                input_ids,
                images=images_tensor.half().cuda(),
                do_sample=False,
                temperature=0.0,
                max_new_tokens=32,
                use_cache=True,
                stopping_criteria=[stopping_criteria],
                pad_token_id=tokenizer.eos_token_id,
            )

        outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
        outputs = outputs.strip()

        if outputs.endswith(stop_str):
            outputs = outputs[: -len(stop_str)]
        outputs = outputs.strip()
        print(outputs)

        # Define the regex patterns for each action
        patterns = {
            0: re.compile(r"\bstop\b", re.IGNORECASE),
            1: re.compile(r"\bis move forward\b", re.IGNORECASE),
            2: re.compile(r"\bis turn left\b", re.IGNORECASE),
            3: re.compile(r"\bis turn right\b", re.IGNORECASE),
        }

        # Function to map a string to an action integer
        def map_string_to_action(s):
            for action, pattern in patterns.items():
                if pattern.search(s):
                    return action
            return None  # Return None if no match is found

        try:
            actions = [map_string_to_action(outputs)]
        except:
            actions = [1]
        print(actions)

    if actions[0] == 1:
        try:
            match = re.search(r"move forward (\d+) cm", outputs)
            distance = int(match.group(1))
        except:
            distance = 25
        if (distance % 25) != 0:
            distance = min([25, 50, 75], key=lambda x: abs(x - distance))
        outputs = envs.step([1])

        for _ in range(int(distance // 25) - 1):
            queue_actions.append(1)

    elif actions[0] == 2:
        try:
            match = re.search(r"turn left (\d+) degree", outputs)
            degree = int(match.group(1))
        except:
            degree = 15
        if (degree % 15) != 0:
            degree = min([15, 30, 45], key=lambda x: abs(x - degree))
        outputs = envs.step([2])

        for _ in range(int(degree // 15) - 1):
            queue_actions.append(2)
        print(f"queue length: {len(queue_actions)}")

    elif actions[0] == 3:
        try:
            match = re.search(r"turn right (\d+) degree", outputs)
            degree = int(match.group(1))
        except:
            degree = 15
        if (degree % 15) != 0:
            degree = min([15, 30, 45], key=lambda x: abs(x - degree))
        outputs = envs.step([3])

        for _ in range(int(degree // 15) - 1):
            queue_actions.append(3)

    else:  # 0, stop
        outputs = envs.step(actions)

if __name__ == "__main__":
    main()