{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "6ff5c40a-f36d-4726-b488-e21d3d043aba",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-12-23T08:16:11.614985Z",
     "iopub.status.busy": "2024-12-23T08:16:11.614627Z",
     "iopub.status.idle": "2024-12-23T08:16:14.405365Z",
     "shell.execute_reply": "2024-12-23T08:16:14.404753Z",
     "shell.execute_reply.started": "2024-12-23T08:16:11.614942Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor\n",
    "from qwen_vl_utils import process_vision_info\n",
    "from modelscope import snapshot_download\n",
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "0c3f4255-3432-4fcf-b89e-b2b28dad968e",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-12-23T08:16:16.561574Z",
     "iopub.status.busy": "2024-12-23T08:16:16.561097Z",
     "iopub.status.idle": "2024-12-23T08:16:16.564652Z",
     "shell.execute_reply": "2024-12-23T08:16:16.563837Z",
     "shell.execute_reply.started": "2024-12-23T08:16:16.561548Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "# 定义模型路径\n",
    "model_directory = \"/mnt/workspace/models/Qwen2-VL-7B-Instruct\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "205e5ad4-162a-42e5-8428-06e9e068fe57",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-12-23T08:16:17.502302Z",
     "iopub.status.busy": "2024-12-23T08:16:17.501934Z",
     "iopub.status.idle": "2024-12-23T08:16:31.177048Z",
     "shell.execute_reply": "2024-12-23T08:16:31.176489Z",
     "shell.execute_reply.started": "2024-12-23T08:16:17.502276Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`Qwen2VLRotaryEmbedding` can now be fully parameterized by passing the model config through the `config` argument. All other arguments will be removed in v4.46\n",
      "Loading checkpoint shards: 100%|██████████| 5/5 [00:12<00:00,  2.47s/it]\n",
      "Some parameters are on the meta device because they were offloaded to the cpu.\n"
     ]
    }
   ],
   "source": [
    "try:\n",
    "    # 加载预训练模型和处理器到可用设备（CPU或GPU）\n",
    "    model = Qwen2VLForConditionalGeneration.from_pretrained(\n",
    "        model_directory, torch_dtype=torch.float16, device_map=\"auto\"\n",
    "    )\n",
    "    processor = AutoProcessor.from_pretrained(model_directory)\n",
    "except Exception as e:\n",
    "    print(f\"加载模型时出错: {e}\")\n",
    "    raise"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "0ec42c46-239c-4750-9141-0942ae8b69b1",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-12-23T08:17:49.918414Z",
     "iopub.status.busy": "2024-12-23T08:17:49.918033Z",
     "iopub.status.idle": "2024-12-23T08:17:49.922512Z",
     "shell.execute_reply": "2024-12-23T08:17:49.921970Z",
     "shell.execute_reply.started": "2024-12-23T08:17:49.918390Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "messages = [\n",
    "    {\n",
    "        \"role\": \"user\",\n",
    "        \"content\": [\n",
    "            {\n",
    "                \"type\": \"image\",\n",
    "                # URL\n",
    "                # \"image\": \"https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg\",\n",
    "                # 绝对路径\n",
    "                # \"image\": \"/mnt/workspace/datasets/doraemon-bot/train/images/009c152a-ade3-42d4-8d45-b5031b847b05-30-0.jpg\",\n",
    "                # 相对路径\n",
    "                \"image\": \"../../datasets/doraemon-bot/train/images/009c152a-ade3-42d4-8d45-b5031b847b05-30-0.jpg\",\n",
    "            },\n",
    "            {\"type\": \"text\", \"text\": \"Picture 1: <image>\\n你是一个电商领域识图专家,可以理解消费者上传的软件截图或实物拍摄图。现在,请你对消费者上传的图片进行分类。你只需要回答图片分类结果,不需要其他多余的话。以下是可以参考的分类标签,分类标签:[\\\"实物拍摄(含售后)\\\",\\\"商品分类选项\\\",\\\"商品头图\\\",\\\"商品详情页截图\\\",\\\"下单过程中出现异常（显示购买失败浮窗）\\\",\\\"订单详情页面\\\",\\\"支付页面\\\",\\\"消费者与客服聊天页面\\\",\\\"评论区截图页面\\\",\\\"物流页面-物流列表页面\\\",\\\"物流页面-物流跟踪页面\\\",\\\"物流页面-物流异常页面\\\",\\\"退款页面\\\",\\\"退货页面\\\",\\\"换货页面\\\",\\\"购物车页面\\\",\\\"店铺页面\\\",\\\"活动页面\\\",\\\"优惠券领取页面\\\",\\\"账单/账户页面\\\",\\\"个人信息页面\\\",\\\"投诉举报页面\\\",\\\"平台介入页面\\\",\\\"外部APP截图\\\",\\\"其他类别图片\\\"]。\"},\n",
    "        ],\n",
    "    }\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "a57110bf-6eba-4334-804b-1e313fee81bb",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-12-23T08:17:50.541977Z",
     "iopub.status.busy": "2024-12-23T08:17:50.541635Z",
     "iopub.status.idle": "2024-12-23T08:17:50.562350Z",
     "shell.execute_reply": "2024-12-23T08:17:50.561681Z",
     "shell.execute_reply.started": "2024-12-23T08:17:50.541954Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n<|im_start|>user\\n<|vision_start|><|image_pad|><|vision_end|>Picture 1: <image>\\n你是一个电商领域识图专家,可以理解消费者上传的软件截图或实物拍摄图。现在,请你对消费者上传的图片进行分类。你只需要回答图片分类结果,不需要其他多余的话。以下是可以参考的分类标签,分类标签:[\"实物拍摄(含售后)\",\"商品分类选项\",\"商品头图\",\"商品详情页截图\",\"下单过程中出现异常（显示购买失败浮窗）\",\"订单详情页面\",\"支付页面\",\"消费者与客服聊天页面\",\"评论区截图页面\",\"物流页面-物流列表页面\",\"物流页面-物流跟踪页面\",\"物流页面-物流异常页面\",\"退款页面\",\"退货页面\",\"换货页面\",\"购物车页面\",\"店铺页面\",\"活动页面\",\"优惠券领取页面\",\"账单/账户页面\",\"个人信息页面\",\"投诉举报页面\",\"平台介入页面\",\"外部APP截图\",\"其他类别图片\"]。<|im_end|>\\n<|im_start|>assistant\\n'"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Preparation for inference\n",
    "text = processor.apply_chat_template(\n",
    "    messages, tokenize=False, add_generation_prompt=True\n",
    ")\n",
    "text"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "2f37d5e3-1992-43a5-95aa-3a457b4626c0",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-12-23T08:17:56.941184Z",
     "iopub.status.busy": "2024-12-23T08:17:56.940826Z",
     "iopub.status.idle": "2024-12-23T08:18:09.827206Z",
     "shell.execute_reply": "2024-12-23T08:18:09.826618Z",
     "shell.execute_reply.started": "2024-12-23T08:17:56.941156Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['实物拍摄(含售后)']\n"
     ]
    }
   ],
   "source": [
    "try:\n",
    "    # 使用处理器准备文本模板用于推理\n",
    "    text_template = processor.apply_chat_template(\n",
    "        messages, tokenize=False, add_generation_prompt=True\n",
    "    )\n",
    "\n",
    "    # 处理视觉信息（图像或视频），准备输入给模型\n",
    "    image_inputs, video_inputs = process_vision_info(messages)\n",
    "\n",
    "    # 将文本和视觉信息转化为模型可以接受的格式\n",
    "    inputs = processor(\n",
    "        text=[text_template],\n",
    "        images=image_inputs,\n",
    "        videos=video_inputs,\n",
    "        padding=True,\n",
    "        return_tensors=\"pt\",\n",
    "    ).to(\"cuda\")  # 移动数据到GPU\n",
    "\n",
    "    # 推理：生成输出\n",
    "    with torch.no_grad():  # 禁用梯度计算以节省内存\n",
    "        generated_ids = model.generate(**inputs, max_new_tokens=128)\n",
    "\n",
    "    # 剪裁掉输入部分，只保留新生成的部分\n",
    "    generated_ids_trimmed = [out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]\n",
    "\n",
    "    # 解码生成的token IDs为人类可读的文本\n",
    "    output_text = processor.batch_decode(\n",
    "        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False\n",
    "    )\n",
    "\n",
    "    # 打印模型输出的结果\n",
    "    print(output_text)\n",
    "\n",
    "except Exception as e:\n",
    "    print(f\"推理过程中发生错误: {e}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "dc14ec2b-30e4-4189-926b-b3b2b2b078cf",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-12-19T14:18:26.452197Z",
     "iopub.status.busy": "2024-12-19T14:18:26.451874Z",
     "iopub.status.idle": "2024-12-19T14:18:26.510926Z",
     "shell.execute_reply": "2024-12-19T14:18:26.510438Z",
     "shell.execute_reply.started": "2024-12-19T14:18:26.452166Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "image_inputs, video_inputs = process_vision_info(messages)\n",
    "inputs = processor(\n",
    "    text=[text],\n",
    "    images=image_inputs,\n",
    "    videos=video_inputs,\n",
    "    padding=True,\n",
    "    return_tensors=\"pt\",\n",
    ")\n",
    "inputs = inputs.to(\"cuda\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "ebf2c19c-c6ef-42fe-b929-d58e328ae4b2",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-12-19T14:18:27.295709Z",
     "iopub.status.busy": "2024-12-19T14:18:27.295393Z",
     "iopub.status.idle": "2024-12-19T14:18:37.819951Z",
     "shell.execute_reply": "2024-12-19T14:18:37.819487Z",
     "shell.execute_reply.started": "2024-12-19T14:18:27.295691Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['实物拍摄(含售后)']\n"
     ]
    }
   ],
   "source": [
    "# Inference: Generation of the output\n",
    "generated_ids = model.generate(**inputs, max_new_tokens=128)\n",
    "generated_ids_trimmed = [\n",
    "    out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)\n",
    "]\n",
    "output_text = processor.batch_decode(\n",
    "    generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False\n",
    ")\n",
    "print(output_text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "a2bc375e-842b-4e1a-8ea2-264901e2be34",
   "metadata": {
    "ExecutionIndicator": {
     "show": false
    },
    "execution": {
     "iopub.execute_input": "2024-12-19T14:07:12.373733Z",
     "iopub.status.busy": "2024-12-19T14:07:12.373401Z",
     "iopub.status.idle": "2024-12-19T14:07:12.376620Z",
     "shell.execute_reply": "2024-12-19T14:07:12.375968Z",
     "shell.execute_reply.started": "2024-12-19T14:07:12.373712Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "\n",
    "# We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.\n",
    "# model = Qwen2VLForConditionalGeneration.from_pretrained(\n",
    "#     model_dir,\n",
    "#     torch_dtype=torch.bfloat16,\n",
    "#     attn_implementation=\"flash_attention_2\",\n",
    "#     device_map=\"auto\",\n",
    "# )\n",
    "\n",
    "# default processer\n",
    "\n",
    "\n",
    "# The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.\n",
    "# min_pixels = 256*28*28\n",
    "# max_pixels = 1280*28*28\n",
    "# processor = AutoProcessor.from_pretrained(model_dir, min_pixels=min_pixels, max_pixels=max_pixels)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f4477977-44c0-4006-8faf-43d2b44eb839",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
