import argparse
import torch
import gradio as gr
from moondream import detect_device, LATEST_REVISION
from threading import Thread
from transformers import TextIteratorStreamer, AutoTokenizer, AutoModelForCausalLM 
from PIL import ImageDraw
import re
from torchvision.transforms.v2 import Resize
import json
import os
import time


from llavamodel.model.builder import load_pretrained_model
from llavamodel.mm_utils import process_images, load_image_from_base64, tokenizer_image_token
from llavamodel.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from transformers import TextIteratorStreamer
from llavamodel.conversation import (default_conversation, conv_templates, SeparatorStyle)
import hashlib
import datetime
from llavamodel.constants import LOGDIR
from llavamodel.utils import (build_logger, violates_moderation, moderation_msg, server_error_msg)

logger = build_logger("gradio_web_server", "gradio_web_server.log")


def get_conv_log_filename():
    t = datetime.datetime.now()
    name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json")
    return name

def add_text(state, text, image, image_process_mode):
    if len(text) <= 0 and image is None:
        state.skip_next = True
        return (state, state.to_gradio_chatbot(), "", None)
    if False:
        flagged = violates_moderation(text)
        if flagged:
            state.skip_next = True
            return (state, state.to_gradio_chatbot(), moderation_msg, None)

    text = text[:1536]  # Hard cut-off
    if image is not None:
        text = text[:1200]  # Hard cut-off for images
        if '<image>' not in text:
            # text = '<Image><image></Image>' + text
            text = text + '\n<image>'
        text = (text, image, image_process_mode)
        state = default_conversation.copy()
    state.append_message(state.roles[0], text)
    state.append_message(state.roles[1], None)
    state.skip_next = False
    return (state, state.to_gradio_chatbot(),"", None)


def http_bot(state, model_selector, temperature, top_p, max_new_tokens):
    start_tstamp = time.time()
    model_name = model_selector

    if state.skip_next:
        # This generate call is skipped due to invalid inputs
        yield (state, state.to_gradio_chatbot())
        return

    if len(state.messages) == state.offset + 2:
        # First round of conversation
        if "llava" in model_name.lower():
            if 'llama-2' in model_name.lower():
                template_name = "llava_llama_2"
            elif "mistral" in model_name.lower() or "mixtral" in model_name.lower():
                if 'orca' in model_name.lower():
                    template_name = "mistral_orca"
                elif 'hermes' in model_name.lower():
                    template_name = "chatml_direct"
                else:
                    template_name = "mistral_instruct"
            elif 'llava-v1.6-34b' in model_name.lower():
                template_name = "chatml_direct"
            elif "v1" in model_name.lower():
                if 'mmtag' in model_name.lower():
                    template_name = "v1_mmtag"
                elif 'plain' in model_name.lower() and 'finetune' not in model_name.lower():
                    template_name = "v1_mmtag"
                else:
                    template_name = "llava_v1"
            elif "mpt" in model_name.lower():
                template_name = "mpt"
            else:
                if 'mmtag' in model_name.lower():
                    template_name = "v0_mmtag"
                elif 'plain' in model_name.lower() and 'finetune' not in model_name.lower():
                    template_name = "v0_mmtag"
                else:
                    template_name = "llava_v0"
        elif "mpt" in model_name:
            template_name = "mpt_text"
        elif "llama-2" in model_name:
            template_name = "llama_2"
        else:
            template_name = "vicuna_v1"
        new_state = conv_templates[template_name].copy()
        new_state.append_message(new_state.roles[0], state.messages[-2][1])
        new_state.append_message(new_state.roles[1], None)
        state = new_state

    # Construct prompt
    prompt = state.get_prompt()

    all_images = state.get_images(return_pil=True)
    all_image_hash = [hashlib.md5(image.tobytes()).hexdigest() for image in all_images]
    for image, hash in zip(all_images, all_image_hash):
        t = datetime.datetime.now()
        filename = os.path.join(LOGDIR, "serve_images", f"{t.year}-{t.month:02d}-{t.day:02d}", f"{hash}.jpg")
        if not os.path.isfile(filename):
            os.makedirs(os.path.dirname(filename), exist_ok=True)
            image.save(filename)

    # Make requests
    pload = {
        "model": model_name,
        "prompt": prompt,
        "temperature": float(temperature),
        "top_p": float(top_p),
        "max_new_tokens": min(int(max_new_tokens), 1536),
        "stop": state.sep if state.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.MPT] else state.sep2,
        "images": f'List of {len(state.get_images())} images: {all_image_hash}',
    }
    logger.info(f"==== request ====\n{pload}")

    pload['images'] = state.get_images()

    state.messages[-1][-1] = "▌"
    yield (state, state.to_gradio_chatbot())

    # Stream output
    response = generate_stream(pload)
    # import pdb;pdb.set_trace()
    for data in response:
        if data["error_code"] == 0:
            output = data["text"][len(prompt):].strip()
            state.messages[-1][-1] = output + "▌"
            yield (state, state.to_gradio_chatbot())
        else:
            output = data["text"] + f" (error_code: {data['error_code']})"
            state.messages[-1][-1] = output
            yield (state, state.to_gradio_chatbot())
            return
        time.sleep(0.03)


    state.messages[-1][-1] = state.messages[-1][-1][:-1]
    yield (state, state.to_gradio_chatbot())

    finish_tstamp = time.time()
    logger.info(f"{output}")

    with open(get_conv_log_filename(), "a") as fout:
        data = {
            "tstamp": round(finish_tstamp, 4),
            "type": "chat",
            "model": model_name,
            "start": round(start_tstamp, 4),
            "finish": round(finish_tstamp, 4),
            "state": state.dict(),
            "images": all_image_hash,
            # "ip": request.client.host,
        }
        fout.write(json.dumps(data) + "\n")

# @torch.inference_mode()
def generate_stream(params):
    model_path = "liuhaotian/" + params["model"]
    tokenizer_path="llava-hf/llava-1.5-7b-hf"
    model_base = None
    load_8bit = False
    load_4bit = False
    use_flash_attn = False
    device, dtype = detect_device()

    tokenizer, model, image_processor, context_len = load_pretrained_model(
            model_path, model_base, params["model"], load_8bit, load_4bit, device=device, use_flash_attn=use_flash_attn)
    
    
    prompt = params["prompt"]
    ori_prompt = prompt
    # params = {"temperature":0.2, "top_p": 0.7, "max_new_tokens":1024}
    images = params.get("images", None)
    num_image_tokens = 0
    if images is not None and len(images) > 0:
        if len(images) > 0:
            if len(images) != prompt.count(DEFAULT_IMAGE_TOKEN):
                raise ValueError("Number of images does not match number of <image> tokens in prompt")

            images = [load_image_from_base64(image) for image in images]
            image_sizes = [image.size for image in images]
            images = process_images(images, image_processor, model.config)

            if type(images) is list:
                images = [image.to(model.device, dtype=torch.float16) for image in images]
            else:
                images = images.to(model.device, dtype=torch.float16)

            replace_token = DEFAULT_IMAGE_TOKEN
            if getattr(model.config, 'mm_use_im_start_end', False):
                replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
            prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
            num_image_tokens = prompt.count(replace_token) * model.get_vision_tower().num_patches
        else:
            images = None
            image_sizes = None
        image_args = {"images": images, "image_sizes": image_sizes}
    else:
        images = None
        image_args = {}

    temperature = float(params.get("temperature", 1.0))
    top_p = float(params.get("top_p", 1.0))
    max_context_length = getattr(model.config, 'max_position_embeddings', 2048)
    max_new_tokens = min(int(params.get("max_new_tokens", 256)), 1024)
    stop_str = params.get("stop", None)
    do_sample = True if temperature > 0.001 else False

    input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(device)
    keywords = [stop_str]
    # stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
    streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=15)

    max_new_tokens = min(max_new_tokens, max_context_length - input_ids.shape[-1] - num_image_tokens)

    if max_new_tokens < 1:
        yield json.dumps({"text": ori_prompt + "Exceeds max token length. Please start a new conversation, thanks.", "error_code": 0}).encode() + b"\0"
        return

    thread = Thread(target=model.generate, kwargs=dict(
        inputs=input_ids,
        do_sample=do_sample,
        temperature=temperature,
        top_p=top_p,
        max_new_tokens=max_new_tokens,
        streamer=streamer,
        use_cache=True,
        **image_args
    ))
    thread.start()

    generated_text = ori_prompt
    for new_text in streamer:
        generated_text += new_text
        if generated_text.endswith(stop_str):
            generated_text = generated_text[:-len(stop_str)]
        yield {"text": generated_text, "error_code": 0}
        

title_markdown = ("""
# 🌋 LLaVA: Large Language and Vision Assistant
[[Project Page](https://llava-vl.github.io)] [[Code](https://github.com/haotian-liu/LLaVA)] [[Model](https://github.com/haotian-liu/LLaVA/blob/main/docs/MODEL_ZOO.md)] | 📚 [[LLaVA](https://arxiv.org/abs/2304.08485)] [[LLaVA-v1.5](https://arxiv.org/abs/2310.03744)] [[LLaVA-v1.6](https://llava-vl.github.io/blog/2024-01-30-llava-1-6/)]
""")

tos_markdown = ("""
### Terms of use
By using this service, users are required to agree to the following terms:
The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research.
Please click the "Flag" button if you get any inappropriate answer! We will collect those to keep improving our moderator.
For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
""")


learn_more_markdown = ("""
### License
The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation.
""")

block_css = """

#buttons button {
    min-width: min(120px,100%);
}

"""

def build_demo(cur_dir=None):
    textbox = gr.Textbox(show_label=False, placeholder="Enter text and press ENTER", container=False)
    with gr.Blocks(title="LLaVA", theme=gr.themes.Default(), css=block_css) as demo:
        state = gr.State()

        gr.Markdown(title_markdown)

        with gr.Row():
            with gr.Column(scale=3):
                with gr.Row(elem_id="model_selector_row"):
                    model_selector = gr.Dropdown(
                        choices=models,
                        value=models[0] if len(models) > 0 else "",
                        interactive=True,
                        show_label=False,
                        container=False)

                imagebox = gr.Image(type="pil")
                image_process_mode = gr.Radio(
                    ["Crop", "Resize", "Pad", "Default"],
                    value="Default",
                    label="Preprocess for non-square image", visible=False)

                # if cur_dir is None:
                #     cur_dir = os.path.dirname(os.path.abspath(__file__))
                # gr.Examples(examples=[
                #     [f"{cur_dir}/examples/extreme_ironing.jpg", "What is unusual about this image?"],
                #     [f"{cur_dir}/examples/waterview.jpg", "What are the things I should be cautious about when I visit here?"],
                # ], inputs=[imagebox, textbox])

                with gr.Accordion("Parameters", open=False) as parameter_row:
                    temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, step=0.1, interactive=True, label="Temperature",)
                    top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.7, step=0.1, interactive=True, label="Top P",)
                    max_output_tokens = gr.Slider(minimum=0, maximum=1024, value=512, step=64, interactive=True, label="Max output tokens",)

            with gr.Column(scale=8):
                chatbot = gr.Chatbot(
                    elem_id="chatbot",
                    label="LLaVA Chatbot",
                    height=650,
                    layout="panel",
                )
                with gr.Row():
                    with gr.Column(scale=8):
                        textbox.render()
                    with gr.Column(scale=1, min_width=50):
                        submit_btn = gr.Button(value="Send", variant="primary")
                # with gr.Row(elem_id="buttons") as button_row: 
                #     upvote_btn = gr.Button(value="👍  Upvote", interactive=False)
                #     downvote_btn = gr.Button(value="👎  Downvote", interactive=False)
                #     flag_btn = gr.Button(value="⚠️  Flag", interactive=False)
                #     #stop_btn = gr.Button(value="⏹️  Stop Generation", interactive=False)
                #     regenerate_btn = gr.Button(value="🔄  Regenerate", interactive=False)
                #     clear_btn = gr.Button(value="🗑️  Clear", interactive=False)


        url_params = gr.JSON(visible=False)

        # Register l
        textbox.submit(
                add_text,
                [state, textbox, imagebox, image_process_mode],
                [state, chatbot, textbox, imagebox],
                queue=False
            ).then(
                http_bot,
                [state, model_selector, temperature, top_p, max_output_tokens],
                [state, chatbot],
            )
        
        submit_btn.click(
                add_text,
                [state, textbox, imagebox, image_process_mode],
                [state, chatbot, textbox, imagebox]
            ).then(
                http_bot,
                [state, model_selector, temperature, top_p, max_output_tokens],
                [state, chatbot],
            )
        logger.info(f"chatbot: {chatbot}")
        
    return demo

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--host", type=str, default="0.0.0.0")
    parser.add_argument("--port", type=int)
    parser.add_argument("--controller-url", type=str, default="http://localhost:21001")
    parser.add_argument("--concurrency-count", type=int, default=16)
    parser.add_argument("--model-list-mode", type=str, default="once",
        choices=["once", "reload"])
    parser.add_argument("--share", action="store_true")
    parser.add_argument("--moderate", action="store_true")
    args = parser.parse_args()
    logger.info(f"args: {args}")

    models = ["llava-v1.5-7b","llava-v1.6-34b","llava-v1.5-13b"]

    logger.info(args)
    demo = build_demo()
    demo.queue().launch(debug=True)




    # parser = argparse.ArgumentParser()
    # parser.add_argument("--cpu", action="store_true")
    # args = parser.parse_args()

    # if args.cpu:
    #     device = torch.device("cpu")
    #     dtype = torch.float32
    # else:
    #     device, dtype = detect_device()
    #     if device != torch.device("cpu"):
    #         print("Using device:", device)
    #         print("If you run into issues, pass the `--cpu` flag to this script.")
    #         print()

    # 

    # model_path = "liuhaotian/llava-v1.5-7b"
    # tokenizer_path="llava-hf/llava-1.5-7b-hf"
    # model_name = "llava"
    # model_base = None
    # load_8bit = False
    # load_4bit = True
    # use_flash_attn = False
# with gr.Blocks() as demo:
#     gr.Markdown(
#         """
#         # 🌔 llavamodel
#         """
#     )
#     with gr.Row():
#         prompt = gr.Textbox(label="Input Prompt", placeholder="Type here...", scale=4)
#         submit = gr.Button("Submit")
#     with gr.Row():
#         img = gr.Image(type="pil", label="Upload an Image")
#         with gr.Column():
#             output = gr.Markdown(label="Response")
#             ann = gr.Image(visible=False, label="Annotated Image")

#     submit.click(answer_question, [img, prompt], output)
#     prompt.submit(answer_question, [img, prompt], output)
#     output.change(process_answer, [img, output], ann, show_progress=False)

# demo.queue().launch(debug=True)

# $env:HF_ENDPOINT = "https://hf-mirror.com"
# huggingface-cli download --resume-download liuhaotian/llava-v1.5-7b