import argparse
from queue import Queue
from threading import Thread

import torch
from PIL import Image
from transformers import AutoTokenizer, TextIteratorStreamer
from transformers import AutoModelForCausalLM
from moondream.hf import LATEST_REVISION, Moondream, detect_device

print("Script is running...")

if __name__ == "__main__":
    print("Parsing arguments...")
    parser = argparse.ArgumentParser()
    parser.add_argument("--image", type=str, required=True)
    parser.add_argument("--prompt", type=str, required=False)
    parser.add_argument("--caption", action="store_true")
    parser.add_argument("--cpu", action="store_true")
    args = parser.parse_args()

    print("Determining device...")
    if args.cpu:
        device = torch.device("cpu")
        dtype = torch.float32
    else:
        device, dtype = detect_device()
        if device != torch.device("cpu"):
            print("Using device:", device)
            print("If you run into issues, pass the `--cpu` flag to this script.")
            print()

    print("Loading model...")
    image_path = args.image
    prompt = args.prompt

    model_id = "vikhyatk/moondream2"
    revision = "2024-08-26"
    model = AutoModelForCausalLM.from_pretrained(
        model_id, trust_remote_code=True, revision=revision
    )
    tokenizer = AutoTokenizer.from_pretrained(model_id,trust_remote_code=True, revision=revision, torch_dtype=dtype )

    print("Model loaded successfully.")

    print("Encoding image...")
    enc_image = model.encode_image(Image.open(image_path))
    print("Image encoded successfully.")

    print("Generating description of the image...")
    print(model.answer_question(enc_image, "Describe this image.", tokenizer))

    if args.caption:
        print("Generating caption...")
        print(model.caption(images=[Image.open(image_path)], tokenizer=tokenizer)[0])
    else:
        print("Encoding image for question answering...")
        image_embeds = model.encode_image(Image.open(image_path))
        print("Image encoded successfully for question answering.")

        if prompt is None:
            chat_history = ""
            print("Entering chat loop...")
            while True:
                question = input("> ")
                print("Processing question:", question)

                result_queue = Queue()

                streamer = TextIteratorStreamer(tokenizer, skip_special_tokens=True)

                thread_args = (image_embeds, question, tokenizer, chat_history)
                thread_kwargs = {"streamer": streamer, "result_queue": result_queue}

                thread = Thread(
                    target=model.answer_question,
                    args=thread_args,
                    kwargs=thread_kwargs,
                )
                thread.start()

                buffer = ""
                for new_text in streamer:
                    buffer += new_text
                    if not new_text.endswith("<") and not new_text.endswith("END"):
                        print(buffer, end="", flush=True)
                        buffer = ""
                print(buffer)

                thread.join()

                answer = result_queue.get()
                chat_history += f"Question: {question}\n\nAnswer: {answer}\n\n"
        else:
            print("Processing prompt:", prompt)
            answer = model.answer_question(image_embeds, prompt, tokenizer)
            print("Answer:", answer)
