{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "420dd72e-2a06-44bf-8710-28bc19d6e898",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "from openai import OpenAI\n",
    "\n",
    "# 国内代理方式\n",
    "client = OpenAI(\n",
    "    api_key = \"sk-y7DHfp9fzuCxOVm2158638099f9541D3833aB4F4Ed674aCf\",\n",
    "    base_url = \"https://vip.apiyi.com/v1\"    # 此处代理方式，如果是OpenAI官方接口需调整接口地址\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "c97c3158-454f-440f-83bc-67a7414a3c2e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='这幅图展示了一条木板铺成的小路，通向远方，穿过一片广阔的绿色草地。草地两侧长满了高高的青草和一些灌木，远处可以看到几棵树。天空晴朗，飘着几朵细长的白云，呈现出一种宁静而开阔的自然风光。整体画面色彩明亮，给人一种清新、平和的感觉，仿佛走在乡间的小径上，享受大自然的美好与宁静。', role='assistant', function_call=None, tool_calls=None, refusal=None, annotations=[]))\n"
     ]
    }
   ],
   "source": [
    "response = client.chat.completions.create(\n",
    "    model=\"gpt-4.1-mini\",\n",
    "    messages=[\n",
    "        {\n",
    "            \"role\": \"user\",\n",
    "            \"content\": [\n",
    "                {\"type\": \"text\", \"text\": \"介绍一下这幅图？\"},\n",
    "                {\n",
    "                    \"type\": \"image_url\",\n",
    "                    \"image_url\": {\n",
    "                        \"url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"\n",
    "                    }\n",
    "                }\n",
    "            ]\n",
    "        }\n",
    "    ],\n",
    "    max_tokens=300\n",
    ")\n",
    "\n",
    "print(response.choices[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "0e368999-edbd-4d43-9cc4-366d179ba066",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'这幅图展示了一条木板铺成的小路，通向远方，穿过一片广阔的绿色草地。草地两侧长满了高高的青草和一些灌木，远处可以看到几棵树。天空晴朗，飘着几朵细长的白云，呈现出一种宁静而开阔的自然风光。整体画面色彩明亮，给人一种清新、平和的感觉，仿佛走在乡间的小径上，享受大自然的美好与宁静。'"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "response.choices[0].message.content"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "06d57106-6276-41cb-9e16-6b23e4712da5",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "ffac7790-d91c-454a-92a9-3a2348316e31",
   "metadata": {},
   "outputs": [],
   "source": [
    "def query_image_description(url, prompt=\"介绍一下这幅图\"):\n",
    "    response = client.chat.completions.create(\n",
    "        model=\"gpt-4.1-mini\",\n",
    "        messages=[\n",
    "            {\n",
    "                \"role\": \"user\",\n",
    "                \"content\": [\n",
    "                    {\"type\": \"text\", \"text\": prompt},\n",
    "                    {\n",
    "                        \"type\": \"image_url\",\n",
    "                        \"image_url\": {\n",
    "                            \"url\": url\n",
    "                        }\n",
    "                    }\n",
    "                ]\n",
    "            }\n",
    "        ],\n",
    "        max_tokens=300\n",
    "    )\n",
    "    return response.choices[0].message.content"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "d25a84a7-2953-4089-ad4b-bddcc4ba24b5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "这幅图采用了两个“柴犬”形象做对比，用幽默的方式表现了人从16岁时的状态到工作后的状态的变化。\n",
      "\n",
      "左边是“16岁的我”，柴犬被画得非常强壮和自信，配有肌肉发达的身体，代表年轻时的朝气和对未来的美好期待。旁边的文字有“我前途一片光明”、“身体素质高”、“未来可期”、“八九点钟的太阳”，表达了年轻时精力充沛，自信满满。\n",
      "\n",
      "右边是“工作后的我”，柴犬显得疲惫、无精打采，神情很无力，旁边文字写了“好累好困，好想睡懒觉”、“重大伤病不要靠近我啊啊啊”、“我好弱小啊，啊啊啊啊啊”、“我就是普通人”，表现了工作后因压力和劳累而变得疲惫、平凡，没有了年轻时的激情和活力。\n",
      "\n",
      "整体上，这幅图用对比手法和夸张的卡通形象，形象地反映了很多人在成长和工作后的生活状态变化，富有幽默感且很有共鸣。\n"
     ]
    }
   ],
   "source": [
    "image_url = \"https://p6.itc.cn/q_70/images03/20200602/0c267a0d3d814c9783659eb956969ba1.jpeg\"\n",
    "content = query_image_description(image_url)\n",
    "print(content)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cb50e3ae-1c17-430c-9080-4c8518818bfd",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "cb466d7b-2c52-4462-9b94-0ce033416e89",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 识别本地图像\n",
    "import base64\n",
    "import requests\n",
    "import json\n",
    "\n",
    "def query_base64_image_description(image_path, prompt = \"解释一下图中的内容\", max_tokens=1000):\n",
    "    def encode_image(path):\n",
    "        with open(path,\"rb\") as image_file:\n",
    "            return base64.b64encode(image_file.read()).decode(\"utf-8\")\n",
    "\n",
    "    # 获取图像的 Base64 编码字符串\n",
    "    base64_image = encode_image(image_path)\n",
    "    \n",
    "    headers = {\n",
    "        \"Content-Type\": \"application/json\",\n",
    "        \"Authorization\": f\"Bearer {client.api_key}\"\n",
    "    }\n",
    "\n",
    "    payload = {\n",
    "        \"model\": \"gpt-4.1-mini\",\n",
    "        \"messages\": [\n",
    "            {\n",
    "                \"role\": \"user\",\n",
    "                \"content\": [\n",
    "                    {\"type\": \"text\", \"text\": prompt},\n",
    "                    {\"type\": \"image_url\", \"image_url\": {\"url\": f\"data:image/jpeg;base64,{base64_image}\"}}\n",
    "                ]\n",
    "            }\n",
    "        ],\n",
    "        \"max_tokens\": max_tokens\n",
    "    }\n",
    "\n",
    "    \n",
    "    try:\n",
    "        response = requests.post(\"https://vip.apiyi.com/v1/chat/completions\", headers = headers, json = payload)\n",
    "        # 检查响应并提取所需的 content 字段\n",
    "        if response.status_code == 200:\n",
    "            response_data = response.json()\n",
    "            content = response_data['choices'][0]['message']['content']\n",
    "            return content\n",
    "        else:\n",
    "            return f\"Error: {response.status_code}, {response.text}\"\n",
    "    except Exception as e:\n",
    "        print(f\"Exception:{e}\")\n",
    "        return e"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "1370caef-654d-406a-b551-c452d22396e8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "这张图表展示了1980年至2020年期间美国、中国、日本和德国四个国家的国内生产总值（GDP，单位为万亿美元）的变化趋势对比。\n",
      "\n",
      "主要内容和特点如下：\n",
      "1. **时间范围**：从1980年到2020年，共40年数据。\n",
      "2. **纵轴（Y轴）**表示GDP，单位是万亿美元（Trillion USD）。\n",
      "3. **横轴（X轴）**表示年份，从1980年至2020年。\n",
      "4. **四条不同颜色的曲线**代表四个国家的GDP变化：\n",
      "   - 蓝色线：美国（USA）\n",
      "   - 红色线：中国（China）\n",
      "   - 紫色线：日本（Japan）\n",
      "   - 绿色线：德国（Germany）\n",
      "\n",
      "5. **趋势分析**：\n",
      "   - 美国的GDP在40年内稳步增长，且总体保持最高水平，2020年达到约21万亿美元。\n",
      "   - 中国的GDP起步较低，1980年不足0.5万亿美元，但增长非常迅速，尤其是2000年以后加速上升，接近15万亿美元，显示中国经济的快速崛起。\n",
      "   - 日本的GDP在1980年代至1990年代快速增长，之后基本进入停滞或缓慢波动，维持在4万亿美元左右。\n",
      "   - 德国的GDP总体保持较稳定且缓慢增长，约在3.5至4万亿美元之间波动。\n",
      "\n",
      "6. **总结**：\n",
      "   - 美国一直是这四国中GDP最高的国家，且持续增长。\n",
      "   - 中国经济增长速度最快，特别是近20年内快速追赶其他发达国家。\n",
      "   - 日本和德国的GDP相对稳定，增长速度较慢。\n",
      "\n",
      "图表清晰体现了全球主要经济体的发展差异和变化趋势。\n"
     ]
    }
   ],
   "source": [
    "content = query_base64_image_description(\"./images/gdp_1980_2020.jpg\")\n",
    "print(content)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cc13b4b8-ba51-443f-8d77-4f158f2ae2e4",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "9b59e00f-f02f-4407-b98c-ae1bcab96096",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "这张笔记内容主要是在讲述当下流行的PEFT（Parameter-Efficient Fine-Tuning）方案，详细介绍了几种PEFT方法的基本原理和特点，包括Prompt Tuning、Prefix Tuning、LoRA以及QLoRA，具体内容如下：\n",
      "\n",
      "1. **Prompt Tuning （提示调优）**\n",
      "   - 适用于较小的模型。\n",
      "   - 将输入的序列 \\( X = [x_1, x_2, ..., x_n] \\) 转换为带提示的序列 \\( X' = [x_1', x_2', ..., x_k', x_1, x_2, x_3, ..., x_n] \\)。\n",
      "   - 这些提示词向量 \\( x_i' \\) 作用于Embedding层，通常是作为固定的“前缀token”附加在输入序列前面。\n",
      "   - 模型通过线性变换 \\( Y = W X' \\) 进行处理。\n",
      "\n",
      "2. **Prefix Tuning**\n",
      "   - 也是通过在输入前添加一段学习得到的前缀参数 \\( W_p \\)。\n",
      "   - 新权重 \\( W' = [W_p; W] \\) 是在Transformer的encoder/decoder部分使用的（意思是在Transformer模型的中间层进行调优）。\n",
      "   - 通过 \\( Y = W' X \\) 进行计算，以达到微调效果。\n",
      "   \n",
      "3. **LoRA（低秩适配器）**  \n",
      "   - 主要针对大型模型。\n",
      "   - 微调方式：权重矩阵变为 \\( Y = (W + \\Delta W) X \\)，其中\n",
      "   - \\( \\Delta W = AB \\)，其中A和B是低秩矩阵，将高维参数矩阵 \\( W \\in \\mathbb{R}^{m \\times n} \\) 分解为两个低秩矩阵 \\( A \\in \\mathbb{R}^{m \\times r} \\) 和 \\( B \\in \\mathbb{R}^{r \\times n} \\)（r是秩，远小于m和n）。\n",
      "   - 目的是用较少参数适配大模型，实现针对特定数据和下游任务的微调，节省存储和计算资源。\n",
      "   - 也提到一种切换场景的应用，操作形式为 \\((W + \\Delta W) - \\Delta W + \\Delta W'\\)，表示可以灵活切换不用的适配权重。\n",
      "\n",
      "4. **QLoRA（量化LoRA，降低显存）**\n",
      "   - 结合LoRA和量化技术，降低显存占用。\n",
      "   - 例子中给出了LLaMA-65B模型的存储需求：\n",
      "     - 原始LoRA大约需要780GB显存。\n",
      "     - QLoRA调优后显存需求降至48GB。\n",
      "   \n",
      "综上，这些方案都是当前微调大型预训练模型时，为了减少训练参数量和计算资源而提出的技术，重点介绍了它们不同的参数表示和调优方式。\n"
     ]
    }
   ],
   "source": [
    "# 识别手写体\n",
    "content = query_base64_image_description(\"./images/handwriting_0.jpg\")\n",
    "print(content)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b695060d-99ae-450c-9924-03eca1d5f69c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "903497f6-b37c-4bc4-8409-b083e4264863",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/markdown": [
       "这张笔记内容主要是在讲述当下流行的PEFT（Parameter-Efficient Fine-Tuning）方案，详细介绍了几种PEFT方法的基本原理和特点，包括Prompt Tuning、Prefix Tuning、LoRA以及QLoRA，具体内容如下：\n",
       "\n",
       "1. **Prompt Tuning （提示调优）**\n",
       "   - 适用于较小的模型。\n",
       "   - 将输入的序列 \\( X = [x_1, x_2, ..., x_n] \\) 转换为带提示的序列 \\( X' = [x_1', x_2', ..., x_k', x_1, x_2, x_3, ..., x_n] \\)。\n",
       "   - 这些提示词向量 \\( x_i' \\) 作用于Embedding层，通常是作为固定的“前缀token”附加在输入序列前面。\n",
       "   - 模型通过线性变换 \\( Y = W X' \\) 进行处理。\n",
       "\n",
       "2. **Prefix Tuning**\n",
       "   - 也是通过在输入前添加一段学习得到的前缀参数 \\( W_p \\)。\n",
       "   - 新权重 \\( W' = [W_p; W] \\) 是在Transformer的encoder/decoder部分使用的（意思是在Transformer模型的中间层进行调优）。\n",
       "   - 通过 \\( Y = W' X \\) 进行计算，以达到微调效果。\n",
       "   \n",
       "3. **LoRA（低秩适配器）**  \n",
       "   - 主要针对大型模型。\n",
       "   - 微调方式：权重矩阵变为 \\( Y = (W + \\Delta W) X \\)，其中\n",
       "   - \\( \\Delta W = AB \\)，其中A和B是低秩矩阵，将高维参数矩阵 \\( W \\in \\mathbb{R}^{m \\times n} \\) 分解为两个低秩矩阵 \\( A \\in \\mathbb{R}^{m \\times r} \\) 和 \\( B \\in \\mathbb{R}^{r \\times n} \\)（r是秩，远小于m和n）。\n",
       "   - 目的是用较少参数适配大模型，实现针对特定数据和下游任务的微调，节省存储和计算资源。\n",
       "   - 也提到一种切换场景的应用，操作形式为 \\((W + \\Delta W) - \\Delta W + \\Delta W'\\)，表示可以灵活切换不用的适配权重。\n",
       "\n",
       "4. **QLoRA（量化LoRA，降低显存）**\n",
       "   - 结合LoRA和量化技术，降低显存占用。\n",
       "   - 例子中给出了LLaMA-65B模型的存储需求：\n",
       "     - 原始LoRA大约需要780GB显存。\n",
       "     - QLoRA调优后显存需求降至48GB。\n",
       "   \n",
       "综上，这些方案都是当前微调大型预训练模型时，为了减少训练参数量和计算资源而提出的技术，重点介绍了它们不同的参数表示和调优方式。"
      ],
      "text/plain": [
       "<IPython.core.display.Markdown object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "# 渲染出Markdown格式\n",
    "\n",
    "from IPython.display import display, Markdown\n",
    "\n",
    "display(Markdown(content))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "01e49cd6-f2cc-4af4-8f54-6ca0e20aca65",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/markdown": [
       "这张图展示的是关于“框架与工具”以及“参数高效微调（PEFT）”的学习笔记，内容涉及多种微调方法、模型及其发展，重点关注Transformer模型和大语言模型（LLMs）的参数高效微调技术。以下是详细说明：\n",
       "\n",
       "---\n",
       "\n",
       "### 左页内容：\n",
       "\n",
       "#### 框架与工具\n",
       "- 主要方向是基于 **Transformer** 架构的研究，包括：\n",
       "  - 模型（模型结构）\n",
       "  - 数据\n",
       "  - Benchmark（评测基准）\n",
       "\n",
       "#### 参数高效微调（PEFT）\n",
       "- **PEFT** 相关：\n",
       "  - **SOTA**（state-of-the-art）方法\n",
       "  - PEFT的各种方法介绍\n",
       "\n",
       "#### PEFT具体方法：Prompt Tuning（提示调优）\n",
       "- Adapter（2019，Google）：早期适配器模块方法\n",
       "- Prefix（2021，Stanford）：前缀调优\n",
       "- Prompt（2021，Google）：提示工程调优\n",
       "- P-tuning V1（2021）\n",
       "- P-tuning V2（2022）\n",
       "- Soft Prompts（2021）：软提示\n",
       "- Hard Prompts（template-based 模板提示）\n",
       "- Instruction tuning（指令微调）针对大语言模型（LLMs）\n",
       "\n",
       "#### 主要大语言模型（LLMs）\n",
       "- FLAN (Google，2021.09)\n",
       "- OpenAI InstructGPT\n",
       "- BLOOM\n",
       "- ALPACA\n",
       "- 其他：Claude、ChatGLM2、MOSS\n",
       "\n",
       "---\n",
       "\n",
       "### 右页内容：\n",
       "\n",
       "#### 多模态 指令微调 多模态大语言模型\n",
       "- **Multi-modality Instruction Fine-tuning LLMs**\n",
       "- 例子：LLaVA（13B参数模型）\n",
       "\n",
       "#### LoRA 及其变种\n",
       "- LoRA（低秩适配）\n",
       "- QLoRA\n",
       "- AdaLoRA\n",
       "\n",
       "#### 新的PEFT方法\n",
       "- PETL（2022）\n",
       "- IA3\n",
       "\n",
       "#### Prefix-tuning 和 Adapters\n",
       "- 说明两者从公式上高度相似\n",
       "- 在大语言模型中 refine Δh（某种变化或者差分）\n",
       "- Δh涉及投影方式（Project-down → non-linear → Project-up）\n",
       "- 插入型（Insertion form）：添加类型的调整\n",
       "- 修改型表示（modified representation）：在表示层添加结构\n",
       "- 组合函数（composition func）：Δh + h = ？（组合方式有待确定）\n",
       "\n",
       "#### MAM Adapters\n",
       "- 多模态适配器概念:\n",
       "  - Scaled parallel adapter → IFN（某种新方法或机制）\n",
       "  - 针对head attention最小化影响\n",
       "  - Soft prompt 策略提升效果\n",
       "\n",
       "---\n",
       "\n",
       "### 总结：\n",
       "这张笔记系统总结了参数高效微调的相关方法和技术，内容涵盖：\n",
       "- 各种PEFT算法及其发展历程\n",
       "- 具体实现思路（如插入、修改、组合方式）\n",
       "- 重点应用在大语言模型的指令微调\n",
       "- 新兴的多模态模型适配技术及其不同变体\n",
       "\n",
       "这类笔记对于学习微调大型预训练模型非常有价值，结合了理论、方法及各家做法的引用。"
      ],
      "text/plain": [
       "<IPython.core.display.Markdown object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "content = query_base64_image_description(\"./images/handwriting_1.jpg\")\n",
    "display(Markdown(content))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a1f341b2-ff2c-44f0-8395-9cf0a421ca5c",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "langChain",
   "language": "python",
   "name": "langchain"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
