import os
import threading
from typing import AsyncGenerator
from namo.api.base import VLBase
from loguru import logger
import torch
from termcolor import colored
from transformers import TextStreamer
from transformers import AutoProcessor
from namo.models.namo import NamoForCausalLM
from namo.models.configuration_namo import NamoConfig
from namo.utils.infer_utils import CallbackStreamer, load_multi_images_maybe
from namo.utils.process_utils import convert_image_tags, tokenizer_image_token
from loguru import logger
from huggingface_hub import hf_hub_download, snapshot_download
from namo.utils.process_utils import smart_resize_v1
from transformers.models.clip.image_processing_clip import CLIPImageProcessor
from namo.utils.infer_utils import url_to_image
from transformers import TextIteratorStreamer
from namo.utils.hf_utils import find_and_merge_lora_adapters

try:
    from qwen_vl_utils import process_vision_info
except ImportError as e:
    pass
from .utils import replace_qwenvl_image_token_to_llava


class NamoHydraVL(VLBase):
    def __init__(
        self,
        model_path=None,
        processor_path=None,
        device="auto",
        dtype=torch.bfloat16,
        system_msg="You are Namo small VLM model, trained by NAMO. You can look images and with great OCR ability.",
    ):
        super().__init__(model_path, processor_path, device, dtype)
        # default: Load the model on the available device(s)
        self.default_sys = {"role": "system", "content": system_msg}
        self.history_msgs = [self.default_sys]

        self.dtype = dtype

    def load_model_simple(self, model_path):
        # Check if model_path is a checkpoint directory and has parent with non_lora_trainables.bin
        non_lora_bin = os.path.join(model_path, "non_lora_trainables.bin")
        use_lora = False
        if os.path.exists(non_lora_bin):
            use_lora = True

        if "checkpoint-" in os.path.basename(model_path.rstrip("/")):
            parent_path = os.path.dirname(model_path.rstrip("/"))
            non_lora_bin = os.path.join(parent_path, "non_lora_trainables.bin")
            adapter_model = os.path.join(parent_path, "adapter_model.safetensors")

            if os.path.exists(adapter_model):
                logger.info(f"Loading adapter model: {adapter_model}")
                use_lora = True
                assert os.path.exists(
                    non_lora_bin
                ), f"non_lora_trainables.bin not found in {parent_path}"

        if os.path.exists(non_lora_bin):
            logger.info(f"loading lora: {model_path}")
            if "checkpoint-" in os.path.basename(model_path.rstrip("/")):
                logger.warning(
                    f"Loading from a checkpoint directory, using parent path for config. {os.path.dirname(model_path.rstrip("/"))}"
                )
                config = NamoConfig.from_pretrained(
                    os.path.dirname(model_path.rstrip("/"))
                )
            else:
                config = NamoConfig.from_pretrained(model_path)
            model = NamoForCausalLM(config=config)

            non_lora = torch.load(non_lora_bin)
            non_lora = {
                k.replace("base_model.model.", "").replace(".base_layer", ""): v
                for k, v in non_lora.items()
            }
            model.load_state_dict(non_lora, strict=False)
            model = find_and_merge_lora_adapters(model, model_path)
            return model
        else:
            return NamoForCausalLM.from_pretrained(model_path)

    def load_model(self, model_path):
        if model_path is None:
            model_path = "checkpoints/Namo-500M-Hydra-V1"
        if not os.path.exists(model_path):
            logger.info(
                f"downloading model from lucasjin/Namo-500M-V1 huggingface into: {model_path}"
            )
            snapshot_download(
                repo_id="lucasjin/Namo-500M-V1",
                local_dir=model_path,
                local_dir_use_symlinks=False,
            )
        # model = NamoForCausalLM.from_pretrained(
        #     model_path,
        #     torch_dtype="auto",
        #     # device_map="auto"
        # )
        model = self.load_model_simple(model_path)
        model.eval().to(self.device).to(self.dtype)
        logger.info(f"model loaded from: {model_path}")
        return model

    def load_processor(self, processor_path):
        processor = self.model.get_vision_tower().image_processor
        self.image_processor = processor
        self.tokenizer = self.model.get_namo().tokenizer
        if self.tokenizer.pad_token_id is None:
            self.tokenizer.pad_token_id = self.tokenizer.encode(
                self.tokenizer.pad_token
            )
        return processor

    def build_chat_prompt(self, messages, tokenizer):
        converted = []
        for msg in messages:
            if msg["role"] == "system":
                converted.append(msg)
            elif msg["role"] == "assistant":
                converted.append(msg["content"])
            else:
                parts = []
                # check if content['text'] already contains image tag
                # do not convert tag
                imgs_num = 0
                txt = ""
                if isinstance(msg["content"], str):
                    txt = msg["content"]
                else:
                    for content in msg["content"]:
                        if content["type"] == "image_url":
                            parts.append("<image>")
                            imgs_num += 1
                        elif content["type"] == "text":
                            parts.append(content["text"] + "\n")
                            txt = content["text"]
                if txt.count("<image>") == imgs_num:
                    parts = txt
                else:
                    parts = "".join(parts)
                    parts = convert_image_tags(parts)
                converted.append({"role": msg["role"], "content": parts})
        return (
            tokenizer.apply_chat_template(converted, tokenize=False)
            + "<|im_start|>assistant\n"
        )

    def get_history_images(self):
        his_images = []
        for msg in self.history_msgs:
            if isinstance(msg["content"], str):
                continue
            for content in msg["content"]:
                if content["type"] == "image_url":
                    his_images.append(content["image_url"])
        return his_images

    @staticmethod
    def msg_has_img(msg):
        if isinstance(msg["content"], list):
            return any(
                [
                    c["type"] == "image_url" and c["image_url"] is not None
                    for c in msg["content"]
                ]
            )
        return False

    def remove_history_images(self):
        hist_images = []
        for msg in self.history_msgs[::-1]:
            if self.msg_has_img(msg):
                msg_new = msg.copy()
                msg_new["content"] = [
                    itm for itm in msg["content"] if itm["type"] != "image_url"
                ]
                hist_images.append(msg_new)
            else:
                hist_images.append(msg)
        self.history_msgs = hist_images[::-1]

    def get_images_history_or_none(self):
        his_images = []
        for msg in self.history_msgs:
            if isinstance(msg["content"], list):
                for itm in msg["content"]:
                    if itm["type"] == "image_url":
                        his_images.append(itm["image_url"])
        return his_images if len(his_images) > 0 else None

    def get_msg(self, text, images=None, max_size=640):
        # Ensure images is a list
        images = (
            []
            if images is None
            else ([images] if not isinstance(images, list) else images)
        )

        # Split out all the "<image>" markers
        segments = text.split("<image>")
        content = []

        # Interleave text segments with images
        for i, seg in enumerate(segments):
            if seg:
                content.append({"type": "text", "text": seg})
            if i < len(images):
                content.append(
                    {
                        "type": "image",
                        "image": images[i],
                        "max_size": max_size,
                    }
                )

        # Any images beyond the embedded markers go at the end
        for img in images[len(segments) :]:
            content.append(
                {
                    "type": "image",
                    "image": img,
                    "max_size": max_size,
                }
            )

        return {
            "role": "user",
            "content": content,
        }

    def generate(
        self,
        prompt,
        images,
        stream=True,
        enable_thinking=False,
        max_size=700,
        max_new_tokens=500,
        verbose=False,
        prevent_more_image=True,
    ):
        msg = self.get_msg(prompt, images, max_size)
        messages = [msg]

        # Preparation for inference
        text = self.processor.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True,
            # enable_thinking=False
        )
        if enable_thinking:
            # logger.warning("not supported yet.")
            # pass
            text = text
        else:
            text += "<think>\n\n</think>\n\n"

        text = replace_qwenvl_image_token_to_llava(text)
        # print(text)
        image_inputs, video_inputs = process_vision_info(messages)
        print(image_inputs)
        inputs = self.processor(
            text=[text],
            images=image_inputs,
            videos=video_inputs,
            padding=True,
            return_tensors="pt",
        )
        # print(inputs["image_grid_thw"])
        input_ids = (
            tokenizer_image_token(text, self.tokenizer, return_tensors="pt")
            .unsqueeze(0)
            .to(self.device)
        )
        # print(input_ids)
        if verbose:
            print(
                f"input_ids: {input_ids} {self.tokenizer.decode([i for i in input_ids[0] if i != -200])}"
            )

        inputs_new = dict()
        inputs_new["input_ids"] = input_ids.to(self.device)
        if "image_grid_thw" in inputs:
            inputs_new["image_sizes"] = inputs["image_grid_thw"].to(self.device)
            inputs_new["pixel_values"] = (
                inputs["pixel_values"].to(self.device).to(self.dtype)
            )
        inputs_new["attention_mask"] = torch.ones_like(input_ids).to(self.device)
        # del inputs['image_grid_thw']
        if verbose:
            print(inputs_new)

        # Inference: Generation of the output
        if stream:
            streamer = TextStreamer(
                self.tokenizer, skip_prompt=True, skip_special_tokens=True
            )
        else:
            streamer = None

        generated_ids = self.model.generate(
            **inputs_new, do_sample=False, max_new_tokens=max_new_tokens, streamer=streamer
        )
        # generated_ids_trimmed = [
        #     generated_ids[len(input_ids):]
        # ]
        output_text = self.processor.batch_decode(
            generated_ids,
            skip_special_tokens=True,
            clean_up_tokenization_spaces=False,
        )
        if verbose:
            print(f"output_text: {output_text}")
        if isinstance(output_text, list):
            return output_text[0]
        return output_text

    # def generate(
    #     self,
    #     prompt,
    #     images,
    #     stream=True,
    #     max_size=700,
    #     verbose=False,
    #     prevent_more_image=True,
    #     keep_history=True,
    # ):

    #     if images is not None:
    #         crt_images = load_multi_images_maybe(images)
    #         if keep_history:
    #             if prevent_more_image:
    #                 # will delete previous all images.
    #                 self.remove_history_images()
    #                 images_in = crt_images
    #             else:
    #                 logger.warning(
    #                     "you have set prevent_more_image=False, current more can not handle history have many images, the result would be wrose."
    #                 )
    #                 images_in = crt_images + self.get_history_images()
    #         else:
    #             self.history_msgs = [self.default_sys]
    #             images_in = crt_images

    #         # print(images)
    #         self.image_processor.size["longest_edge"] = max_size

    #         pixel_values = [
    #             self.image_processor.preprocess(
    #                 img,
    #                 return_tensors="pt",
    #             )["pixel_values"]
    #             .to(self.model.device)
    #             .to(self.model.dtype)
    #             for img in images_in
    #         ]
    #         self.history_msgs.append(
    #             {
    #                 "role": "user",
    #                 "content": [
    #                     {"type": "image_url", "image_url": img} for img in crt_images
    #                 ]
    #                 + [
    #                     {"type": "text", "text": prompt},
    #                 ],
    #             },
    #         )
    #     else:
    #         if keep_history:
    #             his_images = self.get_images_history_or_none()
    #             pixel_values = [
    #                 self.image_processor.preprocess(
    #                     img,
    #                     return_tensors="pt",
    #                 )["pixel_values"]
    #                 .to(self.model.device)
    #                 .to(self.model.dtype)
    #                 for img in his_images
    #             ]
    #         else:
    #             pixel_values = None
    #         self.history_msgs.append(
    #             {
    #                 "role": "user",
    #                 "content": [
    #                     {"type": "text", "text": prompt},
    #                 ],
    #             },
    #         )
    #     if verbose:
    #         if pixel_values is not None:
    #             logger.info(f"pixel_values: {[t.shape for t in pixel_values]}")

    #     if keep_history and len(self.history_msgs) > 6:
    #         # remove on first pair from history
    #         self.history_msgs = [
    #             msg for i, msg in enumerate(self.history_msgs) if i != 1 and i != 2
    #         ]
    #     if verbose and len(self.history_msgs) > 0:
    #         print(self.history_msgs)

    #     input_templated = self.build_chat_prompt(self.history_msgs, self.tokenizer)
    #     if verbose:
    #         print(input_templated)
    #     response = self.generate_response(
    #         self.model,
    #         self.tokenizer,
    #         pixel_values,
    #         input_templated,
    #         stream=stream,
    #     )
    #     if keep_history:
    #         self.history_msgs.append(
    #             {"role": "assistant", "content": response},
    #         )
    #     return response

    def generate_response(
        self, model, tokenizer, pixels, prompt, stream=True, return_generator=False
    ):
        input_ids = (
            tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
            .unsqueeze(0)
            .to(model.device)
        )
        if stream and not return_generator:
            streamer = TextStreamer(
                tokenizer, skip_prompt=True, skip_special_tokens=True
            )
        if return_generator:
            streamer = TextIteratorStreamer(self.tokenizer, skip_special_tokens=True)

        gen_args = {
            "pixel_values": pixels,
            "input_ids": input_ids,
            "max_new_tokens": 460,
            "do_sample": False,
            "eos_token_id": self.tokenizer.eos_token_id,
            "pad_token_id": self.tokenizer.pad_token_id,
            "streamer": streamer if stream or return_generator else None,
        }
        if return_generator:
            thread = threading.Thread(
                target=self.model.generate,
                kwargs=gen_args,
            )
            thread.start()
            return (new_text for new_text in streamer)
        else:
            with torch.autocast(device_type="cuda", dtype=torch.float16):
                output_ids = model.generate(**gen_args)
            return tokenizer.decode(output_ids[0], skip_special_tokens=True).strip()

    def chat_with_request(
        self, messages, stream=True, prevent_more_image=True, verbose=False
    ):
        """
        in case we already have a messages list
        """
        messages_new = []
        images = []
        last_img_idx = 0
        for msg in messages[::-1]:
            if self.msg_has_img(msg):
                if last_img_idx >= 1 and prevent_more_image:
                    msg_new = msg.copy()
                    msg_new["content"] = [
                        itm for itm in msg["content"] if itm["type"] != "image_url"
                    ]
                    messages_new.append(msg_new)
                else:
                    for itm in msg["content"]:
                        if itm["type"] == "image_url":
                            images.append(url_to_image(itm["image_url"]["url"]))
                    messages_new.append(msg)
                last_img_idx += 1
            else:
                messages_new.append(msg)

        if prevent_more_image:
            assert (
                len(images) <= 1
            ), "if prevent more image, images at each iter should be 1."
        messages_new = messages_new[::-1]

        if len(images) > 0:
            pixel_values = [
                self.image_processor.preprocess(
                    img,
                    return_tensors="pt",
                )["pixel_values"]
                .to(self.model.device)
                .to(self.model.dtype)
                for img in images
            ]
        else:
            pixel_values = None

        input_templated = self.build_chat_prompt(messages_new, self.tokenizer)

        if pixel_values is not None:
            print(input_templated)
            print(images)

        if stream:
            generator = self.generate_response(
                self.model,
                self.tokenizer,
                pixel_values,
                input_templated,
                return_generator=True,
            )
            return generator
        else:
            response = self.generate_response(
                self.model, self.tokenizer, pixel_values, input_templated, stream=False
            )
            return response

    def stream_chat_with_request(self, messages):
        for chunk in self.chat_with_request(messages, stream=True):
            yield chunk
