from pathlib import Path

import torch
import numpy as np
from functools import partial

from transformers import AutoModelForVision2Seq, AutoProcessor, PreTrainedModel
from transformers.processing_utils import ProcessorMixin
from PIL import Image

from .config_vit import build_vision_tower


def fake_img() -> Image.Image:
    from PIL import Image, ImageDraw, ImageFont

    # 创建一个空白图像，背景为白色
    image = Image.new("RGB", (100, 100), color=(255, 255, 255))

    # 创建一个绘图对象
    draw = ImageDraw.Draw(image)

    # 加载字体（可以使用系统自带的字体，或者指定字体文件路径）
    try:
        font = ImageFont.truetype("arial.ttf", 80)
    except IOError:
        font = ImageFont.load_default()

    # 在图像上绘制字母 "W"
    draw.text((10, 10), "W", font=font, fill=(0, 0, 0))

    return image


def load_hf_vla(model_path):
    processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
    vla = AutoModelForVision2Seq.from_pretrained(
        model_path,
        attn_implementation="sdpa",  # [Optional] Requires `flash_attn`
        torch_dtype=torch.bfloat16,
        trust_remote_code=True,
        device_map="cuda",
    )
    vla.config.use_cache = False
    vla.language_model.config.use_cache = False
    image: Image.Image = fake_img()
    prompt = "In: What action should the robot take to {<INSTRUCTION>}?\nOut:"

    with torch.no_grad():
        inputs = processor(prompt, image).to(device=vla.device, dtype=torch.bfloat16)
        img = inputs.pixel_values
        action_org = vla.predict_action(
            **inputs, unnorm_key="bridge_orig", do_sample=False, use_cache=False
        )

    return vla, processor, inputs, img, action_org


def build_vla(
    model_path: Path, use_new: bool = False
) -> tuple[
    ProcessorMixin,
    PreTrainedModel,
    dict[str, torch.Tensor],
    torch.Tensor,
    np.ndarray,
    torch.nn.Module,
]:
    vla, processor, inputs, img, action_org = load_hf_vla(model_path)

    vits: list[torch.nn.Module] = (
        vla.vision_backbone.featurizer,
        vla.vision_backbone.fused_featurizer,
        vla.projector,
    )

    v1, v2, p = build_vision_tower(
        config_type=("new" if use_new else "old"),
        state=[m.state_dict() for m in vits],
        device=vla.device,
        dtype=vla.dtype,
    )

    vla.vision_backbone.featurizer = v1
    vla.vision_backbone.fused_featurizer = v2
    vla.projector = p

    vla = vla.cuda()
    with torch.no_grad():
        action = vla.predict_action(**inputs, unnorm_key="bridge_orig", do_sample=False)
        diff = (
            torch.nn.functional.mse_loss(
                *(torch.from_numpy(i) for i in (action, action_org))
            )
            .detach()
            .cpu()
            .item()
        )
        assert diff <= 1e-4, diff
        torch.testing.assert_close(*(torch.from_numpy(i) for i in (action, action_org)))

    return processor, vla, inputs, img.cpu(), action_org, (v1, v2)


if __name__ == "__main__":
    # v1 = vit1()
    # v2 = vit2()
    # p = proj()
    pass
