{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Quick start guide"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Load the llava model and processor from hf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from transformers import AutoProcessor, LlavaForConditionalGeneration,LlavaProcessor\n",
    "\n",
    "model = LlavaForConditionalGeneration.from_pretrained('/data/VLM/llava-v1.5-7b-hf', torch_dtype=torch.bfloat16, low_cpu_mem_usage=True, attn_implementation=\"eager\").to('cuda')\n",
    "processor = LlavaProcessor.from_pretrained('/data/VLM/llava-v1.5-7b-hf', patch_size=14)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from PIL import Image\n",
    "\n",
    "def normalize_to_255(arr):\n",
    "    # 确保输入是NumPy数组\n",
    "    arr = np.asarray(arr)\n",
    "    # 处理全为常数的情况（避免除以零）\n",
    "    if np.min(arr) == np.max(arr):\n",
    "        return np.full_like(arr, 128, dtype=np.uint8)\n",
    "    # 归一化到0-1范围\n",
    "    normalized_arr = (arr - np.min(arr)) / (np.max(arr) - np.min(arr))\n",
    "    # 缩放至0-255范围\n",
    "    scaled_arr = normalized_arr * 255\n",
    "    # 转换为uint8类型（可选，但通常用于图像数据）\n",
    "    return scaled_arr.astype(np.uint8)\n",
    "def stretch_array_to_image(arr, pil_img):\n",
    "    # 获取源数组和目标图像的尺寸\n",
    "    src_height, src_width = arr.shape\n",
    "    target_width, target_height = pil_img.size\n",
    "    \n",
    "    # 创建目标数组\n",
    "    target_arr = np.zeros((target_height, target_width), dtype=arr.dtype)\n",
    "    \n",
    "    # 计算缩放因子\n",
    "    x_scale = src_width / target_width\n",
    "    y_scale = src_height / target_height\n",
    "    \n",
    "    # 双线性插值重采样\n",
    "    for y in range(target_height):\n",
    "        for x in range(target_width):\n",
    "            # 计算在源数组中的对应坐标\n",
    "            src_x = x * x_scale\n",
    "            src_y = y * y_scale\n",
    "            # 找到周围的四个点\n",
    "            x0 = int(src_x)\n",
    "            y0 = int(src_y)\n",
    "            x1 = min(x0 + 1, src_width - 1)\n",
    "            y1 = min(y0 + 1, src_height - 1)\n",
    "            # 计算权重\n",
    "            fx = src_x - x0\n",
    "            fy = src_y - y0\n",
    "            # 双线性插值\n",
    "            value = (1 - fx) * (1 - fy) * arr[y0, x0] + \\\n",
    "                    fx * (1 - fy) * arr[y0, x1] + \\\n",
    "                    (1 - fx) * fy * arr[y1, x0] + \\\n",
    "                    fx * fy * arr[y1, x1]\n",
    "            # 填充目标数组\n",
    "            target_arr[y, x] = value\n",
    "    # 转换为PIL图像\n",
    "    return Image.fromarray(target_arr)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Model running on one example from TextVQA"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from PIL import Image, ImageDraw\n",
    "from run import vicrop_qa\n",
    "\n",
    "model_name = 'llava'\n",
    "method_name = 'rel_att'\n",
    "image_path = 'images/demo1.png'\n",
    "question = 'what is the date of the photo?'\n",
    "short_question = 'what is the date of the photo?'\n",
    "\n",
    "# Run the Vicrop method\n",
    "ori_answer, crop_answer, bbox = vicrop_qa(model_name, method_name, image_path, question, model, processor, short_question)\n",
    "\n",
    "print(f'Model\\'s original answer:  {ori_answer}')\n",
    "print(f'Answer with Vicrop:       {crop_answer}')\n",
    "\n",
    "# Visualize the bounding box\n",
    "image = Image.open(image_path).convert(\"RGB\")\n",
    "image_draw = ImageDraw.Draw(image)\n",
    "image_draw.rectangle(bbox, outline='red', width=4)\n",
    "display(image.resize((500, 500*image.size[1]//image.size[0])))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Visualize the attention map"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "from llava_methods import *\n",
    "\n",
    "question = 'what is the date of the photo?'\n",
    "general_question = 'Write a general description of the image.'\n",
    "\n",
    "prompt = f\"<image>\\nUSER: {question} Answer the question using a single word or phrase.\\nASSISTANT:\"\n",
    "general_prompt = f\"<image>\\nUSER: {general_question} Answer the question using a single word or phrase.\\nASSISTANT:\"\n",
    "\n",
    "att_map = rel_attention_llava(image, prompt, general_prompt, model, processor)\n",
    "att_map=normalize_to_255(att_map)\n",
    "result=stretch_array_to_image(att_map, image)\n",
    "image"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "result"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Another example on high-resolution image (from Vstar bench)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from PIL import Image, ImageDraw\n",
    "from run import vicrop_qa\n",
    "\n",
    "model_name = 'llava'\n",
    "method_name = 'rel_att_high'\n",
    "image_path = 'images/demo2.png'\n",
    "question = \"What is the pose of the woman with yellow backpack?\\n(A) walking\\n(B) running\\n(C) squatting\\n(D) standing\\nAnswer with the option's letter from the given choices directly.\"\n",
    "short_question = \"What is the pose of the woman with yellow backpack?\\n(A) walking\\n(B) running\\n(C) squatting\\n(D) standing\\nAnswer with the option's letter from the given choices directly.\"\n",
    "\n",
    "# Run the Vicrop method\n",
    "ori_answer, crop_answer, bbox = vicrop_qa(model_name, method_name, image_path, question, model, processor, short_question)\n",
    "\n",
    "print(f'Model\\'s original answer:  {ori_answer}')\n",
    "print(f'Answer with Vicrop:       {crop_answer}')\n",
    "\n",
    "# Visualize the bounding box\n",
    "image = Image.open(image_path).convert(\"RGB\")\n",
    "image_draw = ImageDraw.Draw(image)\n",
    "image_draw.rectangle(bbox, outline='red', width=4)\n",
    "display(image.resize((1000, 1000*image.size[1]//image.size[0])))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Visualize the attention map"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "from llava_methods import *\n",
    "from utils import *\n",
    "\n",
    "question = \"What is the pose of the woman with yellow backpack?\\n(A) walking\\n(B) running\\n(C) squatting\\n(D) standing\\nAnswer with the option's letter from the given choices directly.\"\n",
    "short_question = \"What is the pose of the woman with yellow backpack?\\n(A) walking\\n(B) running\\n(C) squatting\\n(D) standing\\nAnswer with the option's letter from the given choices directly.\"\n",
    "\n",
    "prompt = f\"<image>\\nUSER: {question} Answer the question using a single word or phrase.\\nASSISTANT:\"\n",
    "general_prompt = f\"<image>\\nUSER: {general_question} Answer the question using a single word or phrase.\\nASSISTANT:\"\n",
    "\n",
    "att_map = high_res(rel_attention_llava, image, prompt, general_prompt, model, processor)\n",
    "att_map=normalize_to_255(att_map)\n",
    "result=stretch_array_to_image(att_map, image)\n",
    "result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "question = \"Describe the image.\"\n",
    "short_question = \"Find the location of the car.\"\n",
    "image_path=\"/home/user/Pictures/1.jpeg\"\n",
    "image = Image.open(image_path).convert(\"RGB\")\n",
    "prompt = f\"<image>\\nUSER: {question}\\nASSISTANT:\"\n",
    "general_prompt = f\"<image>\\nUSER: {general_question}\\nASSISTANT:\"\n",
    "\n",
    "att_map = high_res(rel_attention_llava, image, prompt, general_prompt, model, processor)\n",
    "\n",
    "att_map=normalize_to_255(att_map)\n",
    "result=stretch_array_to_image(att_map, image)\n",
    "result"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "llava",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.17"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
