import gradio as gr
import pandas as pd
from hub_utils import check_for_discussion, report_results
from model_utils import calculate_memory, get_model
from huggingface_hub.utils import HfHubHTTPError


# We need to store them as globals because gradio doesn't have a way for us to pass them in to the button
MODEL = None


def get_results(model_name: str, library: str, options: list, access_token: str):
    global MODEL
    MODEL = get_model(model_name, library, access_token)
    try:
        has_discussion = check_for_discussion(model_name)
    except HfHubHTTPError:
        has_discussion = True
    title = f"## Memory usage for '{model_name}'"
    data = calculate_memory(MODEL, options)
    return [title, gr.update(visible=True, value=pd.DataFrame(data)), gr.update(visible=not has_discussion)]


with gr.Blocks() as demo:
    with gr.Column():
        gr.Markdown(
            """<h1>Model Memory Calculator</h1>

    This tool will help you calculate how much vRAM is needed to train and perform big model inference
    on a model hosted on the Gitee - AI. The minimum recommended vRAM needed for a model
    is denoted as the size of the "largest layer", and training of a model is roughly 4x its size (for Adam).

    These calculations are accurate within a few percent at most, such as `bert-base-cased` being 413.68 MB and the calculator estimating 413.18 MB.

    When performing inference, expect to add up to an additional 20% to this as found by [EleutherAI](https://blog.eleuther.ai/transformer-math/). 
    More tests will be performed in the future to get a more accurate benchmark for each model.

    Currently this tool supports all models hosted that use `transformers` and `timm`.

    To use this tool pass in the URL or model name of the model you want to calculate the memory usage for,
    select which framework it originates from ("auto" will try and detect it from the model metadata), and
    what precisions you want to use.

    该工具将帮助您计算在 Gitee - AI 托管的模型上训练和执行大型模型推理需要多少 vRAM

    对托管在 Gitee - AI 上的模型进行训练和执行大型模型推理所需的 vRAM

    模型所需的最小推荐 vRAM 为 "最大层 "的大小，而模型的训练大约是其大小的 4 倍（对于 Adam）

    这些计算结果最多只能精确到百分之几，例如 "bert-base-cased "为 413.68 MB，而计算器估计为 413.18 MB

    在进行推理时，预计会像 [EleutherAI](https://blog.eleuther.ai/transformer-math/) 所发现的那样，在此基础上再增加 20%。
    今后将进行更多测试，以便为每个模型制定更准确的基准

    目前，该工具支持托管的所有使用 "transformers "和 "timm "的模型

    要使用此工具，请输入要计算内存使用量的模型的 URL 或模型名称、选择该模型来自哪个框架（"auto "将尝试从模型元数据中检测），以及希望使用的精度

    <stroage>注意！！！由于Gitee - AI 上的仓库不允许匿名访问，使用前你需要通过以下链接生成一个自己的私人令牌</storage>

    Gitee 私人令牌：https://gitee.com/profile/personal_access_tokens

    临时使用 Gitee OpenAPI：https://gitee.com/api/v5/swagger 申请授权
    """
        )
        out_text = gr.Markdown()
        out = gr.DataFrame(
            headers=["dtype", "Largest Layer", "Total Size", "Training using Adam"],
            interactive=False,
            visible=False,
        )
        with gr.Row():
            inp = gr.Textbox(label="Model Name or URL", value="google/fnet-base")
        with gr.Row():
            library = gr.Radio(["auto", "transformers", "timm"], label="Library", value="auto")
            options = gr.CheckboxGroup(
                ["float32", "float16/bfloat16", "int8", "int4"],
                value="float32",
                label="Model Precision",
            )
            access_token = gr.Textbox(label="API Token", placeholder="Optional (for gated models)")
        with gr.Row():
            btn = gr.Button("Calculate Memory Usage")
            post_to_hub = gr.Button(
                value="Report results in this model repo's discussions!\n(Will open in a new tab)", visible=False
            )

    btn.click(
        get_results,
        inputs=[inp, library, options, access_token],
        outputs=[out_text, out, post_to_hub],
    )

    post_to_hub.click(lambda: gr.Button.update(visible=False), outputs=post_to_hub).then(
        report_results, inputs=[inp, library, access_token]
    )


demo.launch()
