import argparse
from queue import Queue
from threading import Thread

import torch
import torch_npu
from PIL import Image
from transformers import AutoTokenizer, TextIteratorStreamer
from transformers import AutoModelForCausalLM, AutoTokenizer
from moondream.hf import LATEST_REVISION, Moondream, detect_device
import os
# Default path, change it if needed.
os.environ['Ascend_HOME'] = '/usr/local/Ascend'
os.environ['PATH'] = os.environ['Ascend_HOME'] + '/ascend-toolkit/latest/fwkacllib/npuctl/' + os.environ['PATH']
os.environ['LD_LIBRARY_PATH'] = os.environ['Ascend_HOME'] + '/ascend-toolkit/latest/fwkacllib/lib64' + os.environ['LD_LIBRARY_PATH']
os.environ['PYTHONPATH'] = os.environ['Ascend_HOME'] + '/ascend-toolkit/latest/fwkacllib/python/npuctl/' + os.environ['PYTHONPATH']

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--image", type=str, required=True)
    parser.add_argument("--prompt", type=str, required=False)
    parser.add_argument("--caption", action="store_true")
    parser.add_argument("--cpu", action="store_true")
    args = parser.parse_args()

    if args.cpu:
        device = torch.device("cpu")
        dtype = torch.float32
    else:
        device = torch.device("npu")
        dtype = torch.float16  # NPU 支持半精度计算，可以提高性能

    image_path = args.image
    prompt = args.prompt

    model_id = "vikhyatk/moondream2"
    revision = "2024-08-26"
    model = AutoModelForCausalLM.from_pretrained(
        model_id, trust_remote_code=True, revision=revision,torch_dtype=dtype,
    ).to(device=device)
    model.eval()
    tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision)
    print(f"Model is running on device: {device}")
    image = Image.open(image_path)
    enc_image = model.encode_image(image)
    print(model.answer_question(enc_image, "Describe this image.", tokenizer))


    #image = Image.open(image_path)

    if args.caption:
        print(model.caption(images=[image], tokenizer=tokenizer)[0])
    else:
        image_embeds = model.encode_image(image)

        if prompt is None:
            chat_history = ""

            while True:
                question = input("> ")

                result_queue = Queue()

                streamer = TextIteratorStreamer(tokenizer, skip_special_tokens=True)

                # Separate direct arguments from keyword arguments
                thread_args = (image_embeds, question, tokenizer, chat_history)
                thread_kwargs = {"streamer": streamer, "result_queue": result_queue}

                thread = Thread(
                    target=model.answer_question,
                    args=thread_args,
                    kwargs=thread_kwargs,
                )
                thread.start()

                buffer = ""
                for new_text in streamer:
                    buffer += new_text
                    if not new_text.endswith("<") and not new_text.endswith("END"):
                        print(buffer, end="", flush=True)
                        buffer = ""
                print(buffer)

                thread.join()

                answer = result_queue.get()
                chat_history += f"Question: {question}\n\nAnswer: {answer}\n\n"
        else:
            print(">", prompt)
            answer = model.answer_question(image_embeds, prompt, tokenizer)
            print(answer)
