{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "bca408c8-3e0f-422b-99bf-8c6868479f9a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Prompt: List five ice cream flavors.\n",
      "Your response should be a list of comma separated values, eg: `foo, bar, baz` or `foo,bar,baz`\n",
      "Generated Text: vanilla, chocolate, strawberry, mint chocolate chip, cookies and cream\n",
      "Parsed Output: ['vanilla', 'chocolate', 'strawberry', 'mint chocolate chip', 'cookies and cream']\n"
     ]
    }
   ],
   "source": [
    "from langchain.output_parsers import CommaSeparatedListOutputParser\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.schema import HumanMessage, SystemMessage, BaseMessage, ChatResult, ChatGeneration\n",
    "from langchain.chat_models.base import BaseChatModel\n",
    "from typing import List, Dict, Any, Optional\n",
    "import requests\n",
    "from pydantic import Field\n",
    "\n",
    "class ChatLaozhang(BaseChatModel):\n",
    "    api_key: str = Field(..., description=\"老张API的访问密钥\")\n",
    "    model: str = Field(default=\"gpt-4o-mini\", description=\"使用的模型名称\")\n",
    "    temperature: float = Field(default=1.0, description=\"生成文本的随机性\")\n",
    "    max_tokens: int = Field(default=2048, description=\"最大生成token数\")\n",
    "    api_url: str = \"https://api.laozhang.ai/v1/chat/completions\"\n",
    "\n",
    "    @property\n",
    "    def _llm_type(self) -> str:\n",
    "        \"\"\"返回LLM类型标识符\"\"\"\n",
    "        return \"laozhang-chat\"\n",
    "\n",
    "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
    "        formatted_messages = [\n",
    "            {\"role\": self._get_role(msg), \"content\": msg.content}\n",
    "            for msg in messages\n",
    "        ]\n",
    "        headers = {\n",
    "            \"Authorization\": f\"Bearer {self.api_key}\",\n",
    "            \"Content-Type\": \"application/json\"\n",
    "        }\n",
    "        data = {\n",
    "            \"model\": self.model,\n",
    "            \"messages\": formatted_messages,\n",
    "            \"temperature\": self.temperature,\n",
    "            \"max_tokens\": self.max_tokens,\n",
    "            **kwargs\n",
    "        }\n",
    "        response = requests.post(self.api_url, headers=headers, json=data)\n",
    "        if response.status_code == 200:\n",
    "            content = response.json()[\"choices\"][0][\"message\"][\"content\"]\n",
    "            generation = ChatGeneration(message=HumanMessage(content=content))\n",
    "            return ChatResult(generations=[generation])\n",
    "        else:\n",
    "            raise Exception(f\"API请求失败: {response.text}\")\n",
    "\n",
    "    def _get_role(self, message: BaseMessage) -> str:\n",
    "        if isinstance(message, HumanMessage):\n",
    "            return \"user\"\n",
    "        elif isinstance(message, SystemMessage):\n",
    "            return \"system\"\n",
    "        else:\n",
    "            return \"assistant\"\n",
    "\n",
    "    def _call(self, messages: List[BaseMessage], **kwargs) -> BaseMessage:\n",
    "        \"\"\"直接返回HumanMessage而不是ChatResult\"\"\"\n",
    "        result = self._generate(messages, **kwargs)\n",
    "        return result.generations[0].message\n",
    "\n",
    "# 初始化LLM\n",
    "llm = ChatLaozhang(\n",
    "    api_key=\"sk-tAdNbV9aGTCr4fcE2d4b07Aa9f5c443bA7Fb3dE0Ff82F1A1\",\n",
    "    model=\"gpt-4o-mini\",\n",
    "    temperature=1,\n",
    "    max_tokens=4095\n",
    ")\n",
    "\n",
    "# 创建输出解析器\n",
    "output_parser = CommaSeparatedListOutputParser()\n",
    "format_instructions = output_parser.get_format_instructions()\n",
    "\n",
    "# 创建提示模板\n",
    "prompt = PromptTemplate(\n",
    "    template=\"List five {subject}.\\n{format_instructions}\",\n",
    "    input_variables=[\"subject\"],\n",
    "    partial_variables={\"format_instructions\": format_instructions}\n",
    ")\n",
    "\n",
    "# 格式化输入\n",
    "_input = prompt.format(subject=\"ice cream flavors\")\n",
    "print(\"Prompt:\", _input)\n",
    "\n",
    "# 调用LLM（现在会返回HumanMessage）\n",
    "messages = [HumanMessage(content=_input)]\n",
    "output = llm(messages)\n",
    "\n",
    "# 处理输出（现在output是HumanMessage）\n",
    "if isinstance(output, HumanMessage):\n",
    "    generated_text = output.content\n",
    "    print(\"Generated Text:\", generated_text)\n",
    "    \n",
    "    # 解析输出\n",
    "    try:\n",
    "        parsed_output = output_parser.parse(generated_text)\n",
    "        print(\"Parsed Output:\", parsed_output)\n",
    "    except Exception as e:\n",
    "        print(\"解析失败:\", str(e))\n",
    "        print(\"原始输出:\", generated_text)\n",
    "else:\n",
    "    print(\"Unexpected output type:\", type(output))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "1f2c9d73-6921-4cb4-8d63-f9707f8f8f4a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Prompt: List five ice cream flavors.\n",
      "Your response should be a list of comma separated values, eg: `foo, bar, baz` or `foo,bar,baz`\n",
      "Generated Text: vanilla, chocolate, strawberry, mint chocolate chip, cookies and cream\n",
      "Parsed Output: ['vanilla', 'chocolate', 'strawberry', 'mint chocolate chip', 'cookies and cream']\n"
     ]
    }
   ],
   "source": [
    "from langchain.output_parsers import DatetimeOutputParser\n",
    "from langchain.chains import LLMChain\n",
    "\n",
    "output_parser = DatetimeOutputParser()\n",
    "template = \"\"\"Answer the users question:\n",
    "\n",
    "{question}\n",
    "\n",
    "{format_instructions}\"\"\"\n",
    "\n",
    "prompt = PromptTemplate.from_template(\n",
    "    template,\n",
    "    partial_variables={\"format_instructions\": output_parser.get_format_instructions()},\n",
    ")\n",
    "print(prompt)\n",
    "print(prompt.format(question=\"around when was bitcoin founded?\"))\n",
    "\n",
    "chain = LLMChain(prompt=prompt, llm=OpenAI())\n",
    "output = chain.run(\"around when was bitcoin founded?\")\n",
    "print(output)\n",
    "output_parser.parse(output)\n",
    "print(output_parser.parse(output))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "134ca160-baea-408e-9a57-9e76a88254cf",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "input_variables=['question'] partial_variables={'format_instructions': \"Write a datetime string that matches the following pattern: '%Y-%m-%dT%H:%M:%S.%fZ'.\\n\\nExamples: 1379-06-20T08:29:13.455666Z, 822-08-04T01:43:38.475986Z, 350-10-21T10:24:07.082796Z\\n\\nReturn ONLY this string, no other words!\"} template='Answer the users question:\\n\\n{question}\\n\\n{format_instructions}'\n",
      "Answer the users question:\n",
      "\n",
      "around when was bitcoin founded?\n",
      "\n",
      "Write a datetime string that matches the following pattern: '%Y-%m-%dT%H:%M:%S.%fZ'.\n",
      "\n",
      "Examples: 1379-06-20T08:29:13.455666Z, 822-08-04T01:43:38.475986Z, 350-10-21T10:24:07.082796Z\n",
      "\n",
      "Return ONLY this string, no other words!\n",
      "input_variables=['question'] partial_variables={'format_instructions': \"Write a datetime string that matches the following pattern: '%Y-%m-%dT%H:%M:%S.%fZ'.\\n\\nExamples: 982-01-30T15:01:55.890633Z, 1056-11-29T10:00:34.566582Z, 1863-04-13T21:44:59.509224Z\\n\\nReturn ONLY this string, no other words!\"} template='Answer the users question:\\n\\n{question}\\n\\n{format_instructions}'\n",
      "Answer the users question:\n",
      "\n",
      "around when was bitcoin founded?\n",
      "\n",
      "Write a datetime string that matches the following pattern: '%Y-%m-%dT%H:%M:%S.%fZ'.\n",
      "\n",
      "Examples: 982-01-30T15:01:55.890633Z, 1056-11-29T10:00:34.566582Z, 1863-04-13T21:44:59.509224Z\n",
      "\n",
      "Return ONLY this string, no other words!\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/miniconda3/envs/hlf_env/lib/python3.10/site-packages/langchain_core/_api/deprecation.py:119: LangChainDeprecationWarning: The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 0.3.0. Use RunnableSequence, e.g., `prompt | llm` instead.\n",
      "  warn_deprecated(\n",
      "/root/miniconda3/envs/hlf_env/lib/python3.10/site-packages/langchain_core/_api/deprecation.py:119: LangChainDeprecationWarning: The method `Chain.run` was deprecated in langchain 0.1.0 and will be removed in 0.3.0. Use invoke instead.\n",
      "  warn_deprecated(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2009-01-03T18:15:05.000000Z\n",
      "2009-01-03 18:15:05\n"
     ]
    }
   ],
   "source": [
    "from langchain.output_parsers import DatetimeOutputParser\n",
    "from langchain.chains import SimpleSequentialChain\n",
    "from langchain.schema import StrOutputParser, BaseMessage, ChatResult, ChatGeneration, HumanMessage, SystemMessage\n",
    "from langchain.chat_models.base import BaseChatModel\n",
    "from typing import List, Optional\n",
    "from pydantic import Field\n",
    "import requests\n",
    "from langchain.chains import LLMChain\n",
    "\n",
    "\"\"\"LangChain的接口规范：\n",
    "    BaseChatModel 要求 _generate 方法返回 ChatResult 对象\n",
    "    ChatResult 必须包含 generations 列表，每个元素是 ChatGeneration\n",
    "\"\"\"\n",
    "class CustomLaozhangAI(BaseChatModel):\n",
    "    api_key: str = Field(..., description=\"老张API的访问密钥\")\n",
    "    model: str = Field(default=\"gpt-4o-mini\", description=\"使用的模型名称\")\n",
    "    temperature: float = Field(default=1.0, description=\"生成文本的随机性\")\n",
    "    max_tokens: int = Field(default=2048, description=\"最大生成token数\")\n",
    "    api_url: str = \"https://api.laozhang.ai/v1/chat/completions\"\n",
    "\n",
    "    @property\n",
    "    def _llm_type(self) -> str:\n",
    "        \"\"\"返回LLM类型标识符\"\"\"\n",
    "        return \"laozhang-chat\"\n",
    "\n",
    "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
    "        formatted_messages = [\n",
    "            {\"role\": self._get_role(msg), \"content\": msg.content}\n",
    "            for msg in messages\n",
    "        ]\n",
    "        headers = {\n",
    "            \"Authorization\": f\"Bearer {self.api_key}\",\n",
    "            \"Content-Type\": \"application/json\"\n",
    "        }\n",
    "        data = {\n",
    "            \"model\": self.model,\n",
    "            \"messages\": formatted_messages,\n",
    "            \"temperature\": self.temperature,\n",
    "            \"max_tokens\": self.max_tokens,\n",
    "            **kwargs\n",
    "        }\n",
    "        response = requests.post(self.api_url, headers=headers, json=data)\n",
    "        if response.status_code == 200:\n",
    "            content = response.json()[\"choices\"][0][\"message\"][\"content\"]\n",
    "            # 返回符合LangChain要求的ChatResult对象\n",
    "            generation = ChatGeneration(message=HumanMessage(content=content))\n",
    "            return ChatResult(generations=[generation])\n",
    "        else:\n",
    "            raise Exception(f\"API请求失败: {response.text}\")\n",
    "\n",
    "    def _get_role(self, message: BaseMessage) -> str:\n",
    "        if isinstance(message, HumanMessage):\n",
    "            return \"user\"\n",
    "        elif isinstance(message, SystemMessage):\n",
    "            return \"system\"\n",
    "        else:\n",
    "            return \"assistant\"\n",
    "\n",
    "output_parser = DatetimeOutputParser()\n",
    "template = \"\"\"Answer the users question:\n",
    "\n",
    "{question}\n",
    "\n",
    "{format_instructions}\"\"\"\n",
    "\n",
    "prompt = PromptTemplate.from_template(\n",
    "    template,\n",
    "    partial_variables={\"format_instructions\": output_parser.get_format_instructions()},\n",
    ")\n",
    "print(prompt)\n",
    "print(prompt.format(question=\"around when was bitcoin founded?\"))\n",
    "\n",
    "# 这是一个 LLMChain，用于根据剧目的标题撰写简介。\n",
    "# llm = OpenAI(temperature=0.7, max_tokens=1000)\n",
    "llm = CustomLaozhangAI(\n",
    "    api_key=\"sk-tAdNbV9aGTCr4fcE2d4b07Aa9f5c443bA7Fb3dE0Ff82F1A1\",  # 请替换为你的真实API密钥\n",
    "    model=\"gpt-4o-mini\",\n",
    "    temperature=1,\n",
    "    max_tokens=4095\n",
    ")\n",
    "\n",
    "output_parser = DatetimeOutputParser()\n",
    "template = \"\"\"Answer the users question:\n",
    "\n",
    "{question}\n",
    "\n",
    "{format_instructions}\"\"\"\n",
    "\n",
    "prompt = PromptTemplate.from_template(\n",
    "    template,\n",
    "    partial_variables={\"format_instructions\": output_parser.get_format_instructions()},\n",
    ")\n",
    "print(prompt)\n",
    "print(prompt.format(question=\"around when was bitcoin founded?\"))\n",
    "\n",
    "chain = LLMChain(llm=llm, prompt=prompt)\n",
    "output = chain.run(\"around when was bitcoin founded?\")\n",
    "print(output)\n",
    "output_parser.parse(output)\n",
    "print(output_parser.parse(output))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "4dd90b3c-bea7-4584-a68a-921ce984ed53",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Tell me a funny joke about chickens.\n",
      "input_variables=['adjective', 'content'] template='Tell me a {adjective} joke about {content}.'\n",
      "Tell me a joke\n",
      "Tell me a funny joke about chickens.\n",
      "Tell me a funny joke about chickens.\n",
      "prompt: 讲2个给程序员听得笑话\n",
      "result: content='当然可以！以下是两个程序员相关的笑话：\\n\\n1. **代码和酒**  \\n   程序员走进酒吧，点了一杯酒。酒保问：“你想喝什么？”  \\n   程序员回答：“随便，反正我会调试。”  \\n   酒保笑着说：“你能调试酒吗？”  \\n   程序员说：“当然，只要给我足够的时间和一些错误提示就行！”\\n\\n2. **函数的约会**  \\n   一对函数在约会。男函数说：“我爱你！”  \\n   女函数回答：“谢谢，但我只对你的返回值感兴趣。”  \\n   男函数惭愧地说：“那好吧，我们可以改回传值！”  \\n\\n希望你喜欢这些笑话！' id='run-80bd0a5c-e05d-411d-9246-4560c922f257-0'\n",
      "content='当然可以！以下是三个程序员可以理解的笑话：\\n\\n1. **编程语言的幽默**：\\n   有一天，一个程序员和他的朋友在谈论编程语言。朋友问：“你觉得哪种语言最好？”程序员回答：“其实我觉得科幻小说是最好的，因为没有错误处理。”\\n\\n2. **计算机的恋爱**：\\n   有两个程序员在聊天，一个说：“我在找一个女朋友。”另一个问：“你对她有什么要求？”第一个程序员说：“她至少要有 404 个错误。”  \\n   另一个愣了一下问：“404个错误？为什么？”  \\n   第一个程序员回答：“因为那样我就能找到一个完美的她！”\\n\\n3. **代码与浪漫**：\\n   一位程序员向他的爱人求婚：“愿意嫁给我吗？”  \\n   爱人说：“你为什么突然问这个？”  \\n   程序员笑着回答：“因为我看到这段代码运行了没有错误，我觉得时机到了！”\\n\\n希望这些笑话能让你会心一笑！' id='run-92144e4c-d586-4604-87bf-93268b52c781-0'\n",
      "input_variables=['adjective', 'content'] template='Tell me a {{ adjective }} joke about {{ content }}' template_format='jinja2'\n",
      "content='以下是一个简单的快速排序的 C++ 实现代码：\\n\\n```cpp\\n#include <iostream>\\n#include <vector>\\n\\nusing namespace std;\\n\\n// 快速排序函数\\nint partition(vector<int>& arr, int low, int high) {\\n    int pivot = arr[high]; // 选择最后一个元素作为基准\\n    int i = low - 1; // 指向较小元素的索引\\n\\n    for (int j = low; j < high; j++) {\\n        // 如果当前元素小于或等于基准\\n        if (arr[j] <= pivot) {\\n            i++; // 增加较小元素的索引\\n            swap(arr[i], arr[j]); // 交换\\n        }\\n    }\\n    swap(arr[i + 1], arr[high]); // 将基准放置在正确的位置\\n    return i + 1; // 返回基准的索引\\n}\\n\\nvoid quickSort(vector<int>& arr, int low, int high) {\\n    if (low < high) {\\n        // 找到划分的索引\\n        int pi = partition(arr, low, high);\\n\\n        // 递归排序划分前后的子数组\\n        quickSort(arr, low, pi - 1);\\n        quickSort(arr, pi + 1, high);\\n    }\\n}\\n\\nint main() {\\n    vector<int> arr = {10, 7, 8, 9, 1, 5};\\n    int n = arr.size();\\n\\n    quickSort(arr, 0, n - 1);\\n\\n    cout << \"排序后的数组: \";\\n    for (int i = 0; i < n; i++)\\n        cout << arr[i] << \" \";\\n    cout << endl;\\n\\n    return 0;\\n}\\n```\\n\\n### 代码说明\\n1. **`partition` 函数**: 该函数负责选择一个基准，并将小于基准的元素移到左边，大于基准的元素移到右边，最后返回基准的位置。\\n2. **`quickSort` 函数**: 该函数使用递归的方法对数组进行排序。它调用 `partition` 来获得基准元素的索引，并继续对基准元素的左侧和右侧进行排序。\\n3. **`main` 函数**: 在主函数中定义了一个数组，并调用 `quickSort` 函数，然后打印排序后的结果。\\n\\n### 编译及运行\\n将上述代码保存为一个 `.cpp` 文件，例如 `quick_sort.cpp`，并使用以下命令编译和运行：\\n\\n```bash\\ng++ -o quick_sort quick_sort.cpp\\n./quick_sort\\n```\\n\\n你将看到排序后的数组输出。' id='run-91de33be-6879-4ebb-8839-896de0784462-0'\n",
      "content='My name is Bob! How can I assist you today?' id='run-532a22a0-2437-434f-ae52-43d745b956b7-0'\n",
      "1. [PHP是世界上最好的语言]\n",
      "PHP是世界上最好的情感派编程语言，无需逻辑和算法，只要情绪。它能被蛰伏在冰箱里的PHP大神轻易驾驭，会话结束后的感叹号也能传达对代码的热情。写PHP就像是在做披萨，不需要想那么多，只需把配料全部扔进一个碗，然后放到服务器上，热乎乎出炉的网页就好了。\n",
      "-----------\n",
      "2. [Python是世界上最好的语言]\n",
      "Python是世界上最好的拜金主义者语言。它坚信：美丽就是力量，简洁就是灵魂。Python就像是那个永远在你皱眉的那一刻扔给你言情小说的好友。只有Python，你才能够在两行代码之间感受到飘逸的花香和清新的微风。记住，这世上只有一种语言可以使用空格来领导全世界的进步，那就是Python。\n",
      "-----------\n",
      "3. [Java是世界上最好的语言]\n",
      "Java是世界上最好的德育课编程语言，它始终坚守了严谨、安全的编程信条。Java就像一个严格的老师，他不会对你怀柔，不会让你偷懒，也不会让你走捷径，但他教会你规范和自律。Java就像是那个喝咖啡也算加班费的上司，拥有对邪恶的深度厌恶和对善良的深度拥护。\n",
      "\n",
      "content='1. **PHP是世界上最好的语言**  \\n   这篇文章将PHP描述为一种情感驱动的编程语言，强调无需太多逻辑和技术细节，只需表达情感即可。作者使用了比喻，表示写PHP就像制作披萨，简单而直观，强调了其易用性和乐趣。\\n\\n2. **Python是世界上最好的语言**  \\n   文章称Python是一种追求美丽和简洁的语言，凸显其优雅的语法和高效的表达能力。作者通过比喻和形象的描述，传达Python的吸引力，强调其便捷和强大的特性，能够使编码变得轻松愉快。\\n\\n3. **Java是世界上最好的语言**  \\n   此文将Java作为一门严谨且安全的编程语言，强调其规范性和教育意义。作者用比喻将Java比作严格的老师，传达出学习Java需要遵循规则和纪律，尽管不那么灵活，但能够培养开发者的自律和规范意识。\\n\\n**最佳论点**  \\n在这三篇文章中，**Python是世界上最好的语言**的论点更为出色。原因在于其谈及的美丽与简洁不仅吸引了初学者，也反映了Python在实际应用中广泛的魅力和强大的功能。相比之下，虽然PHP和Java分别强调了情感和规范的重要性，但它们的论点缺乏Python所具备的灵活性和实用性的广泛认同。Python能够实现高效的编码，吸引了大量开发者，这使得其论点在现代编程环境中更加受欢迎。' id='run-ad5ab59a-e506-4df1-afdc-9787c899f4ad-0'\n",
      "[SystemMessage(content='你将获得关于同一主题的2篇文章（用-----------标签分隔）。首先总结每篇文章的论点。然后指出哪篇文章提出了更好的论点，并解释原因。'), HumanMessage(content='1.认为“道可道”中的第一个“道”，指的是道理，如仁义礼智之类；“可道”中的“道”，指言说的意思；“常道”，指恒久存在的“道”。因此，所谓“道可道，非常道”，指的是可以言说的道理，不是恒久存在的“道”，恒久存在的“道”不可言说。如苏辙说：“莫非道也。而可道者不可常，惟不可道，而后可常耳。今夫仁义礼智，此道之可道者也。然而仁不可以为义，而礼不可以为智，可道之不可常如此。……而道常不变，不可道之能常如此。”蒋锡昌说：“此道为世人所习称之道，即今人所谓‘道理’也，第一‘道’字应从是解。《广雅·释诂》二：‘道，说也’，第二‘道’字应从是解。‘常’乃真常不易之义，在文法上为区别词。……第三‘道’字即二十五章‘道法自然’之‘道’，……乃老子学说之总名也”。陈鼓应说：“第一个‘道’字是人们习称之道，即今人所谓‘道理’。第二个‘道’字，是指言说的意思。第三个‘道’字，是老子哲学上的专有名词，在本章它意指构成宇宙的实体与动力。……‘常道’之‘常’，为真常、永恒之意。……可以用言词表达的道，就不是常道”。\\n-----------\\n2.认为“道可道”中的第一个“道”，指的是宇宙万物的本原；“可道”中的“道”，指言说的意思；“常道”，指恒久存在的“道”。因此，“道可道，非常道”，指可以言说的“道”，就不是恒久存在的“道”。如张默生说：“‘道’，指宇宙的本体而言。……‘常’，是经常不变的意思。……可以说出来的道，便不是经常不变的道”。董平说：“第一个‘道’字与‘可道’之‘道’，内涵并不相同。第一个‘道’字，是老子所揭示的作为宇宙本根之‘道’；‘可道’之‘道’，则是‘言说’的意思。……这里的大意就是说：凡一切可以言说之‘道’，都不是‘常道’或永恒之‘道’”。汤漳平等说：“第一句中的三个‘道’，第一、三均指形上之‘道’，中间的‘道’作动词，为可言之义。……道可知而可行，但非恒久不变之道”。\\n--------\\n3.认为“道可道”中的第一个“道”，指的是宇宙万物的本原；“可道”中的“道”，指言说的意思；“常道”，则指的是平常人所讲之道、常俗之道。因此，“道可道，非常道”，指“道”是可以言说的，但它不是平常人所谓的道或常俗之道。如李荣说：“道者，虚极之理也。夫论虚极之理，不可以有无分其象，不可以上下格其真。……圣人欲坦兹玄路，开以教门，借圆通之名，目虚极之理，以理可名，称之可道。故曰‘吾不知其名，字之曰道’。非常道者，非是人间常俗之道也。人间常俗之道，贵之以礼义，尚之以浮华，丧身以成名，忘己而徇利。”司马光说：“世俗之谈道者，皆曰道体微妙，不可名言。老子以为不然，曰道亦可言道耳，然非常人之所谓道也。……常人之所谓道者，凝滞于物。”裘锡圭说：“到目前为止，可以说，几乎从战国开始，大家都把‘可道’之‘道’……看成老子所否定的，把‘常道’‘常名’看成老子所肯定的。这种看法其实有它不合理的地方，……‘道’是可以说的。《老子》这个《道经》第一章，开宗明义是要讲他的‘道’。第一个‘道’字，理所应当，也是讲他要讲的‘道’：道是可以言说的。……那么这个‘恒’字应该怎么讲？我认为很简单，‘恒’字在古代作定语用，经常是‘平常’‘恒常’的意思。……‘道’是可以言说的，但是我要讲的这个‘道’，不是‘恒道’，它不是一般人所讲的‘道’。\\n')]\n",
      "content='**文章1总结**：这篇文章认为“道可道”中的第一个“道”是指道理诸如仁义礼智等，第二个“道”表示可言说的意思，而“常道”则指恒久存在的道。作者认为可以言说的道理并不是恒久存在的真道，因此“道可道，非常道”意指我们能理解和言说的道理并不是真正的道，真正的道是不可言说的。引用了苏辙、蒋锡昌和陈鼓应的观点来支持这个论点。\\n\\n**文章2总结**：本篇文章则认为第一个“道”指宇宙万物的本原，第二个“道”同样指可言说的意思，而“常道”则是恒久不变的道。作者认为可以言说的道并不是真正的、永恒的道，强调了世俗对道的理解与老子所指道的差异。文中引用了张默生、董平和汤漳平的观点，以说明不同理解下对“道”的看法。\\n\\n**文章3总结**：这篇文章强调“道可道”中的第一个“道”是指虚极之理，意指可以被言说的道并非平常人所理解的常俗之道。作者认为老子所指的道即为可以言说的，但这并不代表平常人理解的道。引用李荣、司马光和裘锡圭的观点，进一步阐述了道的深刻内涵及与世俗理解之间的差距。\\n\\n**哪篇文章提出了更好的论点**：综合来看，**文章2**提出了更符合逻辑的论点。文章2清晰地界定了“道”的不同含义，以及它们之间的关系，强调了宇宙本体与可言道之间的区别，合理地阐明了“常道”的概念。虽然文章3也提供了有价值的视角，但界定更为模糊，而文章1则更倾向于传统的道理理解，缺乏对道的更深层次的探讨。文章2的论点更符合老子哲学的复杂性，为“道”的讨论提供了更全面的视角。' id='run-615990d6-a3c4-440b-a66c-4717ea7a454a-0'\n"
     ]
    }
   ],
   "source": [
    "from langchain import PromptTemplate\n",
    "from langchain.output_parsers import DatetimeOutputParser\n",
    "from langchain.chains import SimpleSequentialChain\n",
    "from langchain.schema import StrOutputParser, BaseMessage, ChatResult, ChatGeneration, HumanMessage, SystemMessage\n",
    "from langchain.chat_models.base import BaseChatModel\n",
    "from langchain.prompts import ChatPromptTemplate\n",
    "from typing import List, Optional\n",
    "from pydantic import Field\n",
    "import requests\n",
    "from langchain.chains import LLMChain\n",
    "\n",
    "\"\"\"LangChain的接口规范：\n",
    "    BaseChatModel 要求 _generate 方法返回 ChatResult 对象\n",
    "    ChatResult 必须包含 generations 列表，每个元素是 ChatGeneration\n",
    "\"\"\"\n",
    "class CustomLaozhangAI(BaseChatModel):\n",
    "    api_key: str = Field(..., description=\"老张API的访问密钥\")\n",
    "    model: str = Field(default=\"gpt-4o-mini\", description=\"使用的模型名称\")\n",
    "    temperature: float = Field(default=1.0, description=\"生成文本的随机性\")\n",
    "    max_tokens: int = Field(default=2048, description=\"最大生成token数\")\n",
    "    api_url: str = \"https://api.laozhang.ai/v1/chat/completions\"\n",
    "\n",
    "    @property\n",
    "    def _llm_type(self) -> str:\n",
    "        \"\"\"返回LLM类型标识符\"\"\"\n",
    "        return \"laozhang-chat\"\n",
    "\n",
    "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
    "        formatted_messages = [\n",
    "            {\"role\": self._get_role(msg), \"content\": msg.content}\n",
    "            for msg in messages\n",
    "        ]\n",
    "        headers = {\n",
    "            \"Authorization\": f\"Bearer {self.api_key}\",\n",
    "            \"Content-Type\": \"application/json\"\n",
    "        }\n",
    "        data = {\n",
    "            \"model\": self.model,\n",
    "            \"messages\": formatted_messages,\n",
    "            \"temperature\": self.temperature,\n",
    "            \"max_tokens\": self.max_tokens,\n",
    "            **kwargs\n",
    "        }\n",
    "        response = requests.post(self.api_url, headers=headers, json=data)\n",
    "        if response.status_code == 200:\n",
    "            content = response.json()[\"choices\"][0][\"message\"][\"content\"]\n",
    "            # 返回符合LangChain要求的ChatResult对象\n",
    "            generation = ChatGeneration(message=HumanMessage(content=content))\n",
    "            return ChatResult(generations=[generation])\n",
    "        else:\n",
    "            raise Exception(f\"API请求失败: {response.text}\")\n",
    "\n",
    "    def _get_role(self, message: BaseMessage) -> str:\n",
    "        if isinstance(message, HumanMessage):\n",
    "            return \"user\"\n",
    "        elif isinstance(message, SystemMessage):\n",
    "            return \"system\"\n",
    "        else:\n",
    "            return \"assistant\"\n",
    "\n",
    "# 使用 from_template 方法实例化 PromptTemplate\n",
    "prompt_template = PromptTemplate.from_template(\n",
    "    \"Tell me a {adjective} joke about {content}.\"\n",
    ")\n",
    "\n",
    "# 使用 format 生成提示\n",
    "prompt = prompt_template.format(adjective=\"funny\", content=\"chickens\")\n",
    "print(prompt)\n",
    "print(prompt_template)\n",
    "prompt_template = PromptTemplate.from_template(\n",
    "    \"Tell me a joke\"\n",
    ")\n",
    "# 生成提示\n",
    "prompt = prompt_template.format()\n",
    "print(prompt)\n",
    "\n",
    "invalid_prompt = PromptTemplate(\n",
    "    input_variables=[\"adjective\"],\n",
    "    template=\"Tell me a {adjective} joke about {content}.\"\n",
    ")\n",
    "prompt = invalid_prompt.format(adjective=\"funny\", content=\"chickens\")\n",
    "print(prompt)\n",
    "valid_prompt = PromptTemplate(\n",
    "    input_variables=[\"adjective\", \"content\"],\n",
    "    template=\"Tell me a {adjective} joke about {content}.\"\n",
    ")\n",
    "prompt = valid_prompt.format(adjective=\"funny\", content=\"chickens\")\n",
    "print(prompt)\n",
    "prompt_template = PromptTemplate.from_template(\n",
    "    \"讲{num}个给程序员听得笑话\"\n",
    ")\n",
    "\n",
    "# llm = OpenAI(model_name=\"text-davinci-003\", max_tokens=1000)\n",
    "llm = CustomLaozhangAI(\n",
    "    api_key=\"sk-tAdNbV9aGTCr4fcE2d4b07Aa9f5c443bA7Fb3dE0Ff82F1A1\",  # 请替换为你的真实API密钥\n",
    "    model=\"gpt-4o-mini\",\n",
    "    temperature=1,\n",
    "    max_tokens=4095\n",
    ")\n",
    "\n",
    "prompt = prompt_template.format(num=2)\n",
    "print(f\"prompt: {prompt}\")\n",
    "# 调用LLM（现在会返回HumanMessage）\n",
    "messages = [HumanMessage(content=prompt)]\n",
    "result = llm(messages)\n",
    "print(f\"result: {result}\")\n",
    "messages = [HumanMessage(content=prompt_template.format(num=3))]\n",
    "print(llm(messages))\n",
    "\n",
    "jinja2_template = \"Tell me a {{ adjective }} joke about {{ content }}\"\n",
    "prompt = PromptTemplate.from_template(jinja2_template, template_format=\"jinja2\")\n",
    "\n",
    "prompt.format(adjective=\"funny\", content=\"chickens\")\n",
    "print(prompt)\n",
    "sort_prompt_template = PromptTemplate.from_template(\n",
    "    \"生成可执行的快速排序 {programming_language} 代码\"\n",
    ")\n",
    "# messages = [HumanMessage(content=sort_prompt_template.format(programming_language=\"python\"))]\n",
    "# messages = [HumanMessage(content=sort_prompt_template.format(programming_language=\"java\"))]\n",
    "messages = [HumanMessage(content=sort_prompt_template.format(programming_language=\"c++\"))]\n",
    "print(llm(messages))\n",
    "\n",
    "\n",
    "\n",
    "template = ChatPromptTemplate.from_messages([\n",
    "    (\"system\", \"You are a helpful AI bot. Your name is {name}.\"),\n",
    "    (\"human\", \"Hello, how are you doing?\"),\n",
    "    (\"ai\", \"I'm doing well, thanks!\"),\n",
    "    (\"human\", \"{user_input}\"),\n",
    "])\n",
    "\n",
    "# 生成提示\n",
    "messages = template.format_messages(\n",
    "    name=\"Bob\",\n",
    "    user_input=\"What is your name?\"\n",
    ")\n",
    "# print(messages)\n",
    "# print(messages[0].content)\n",
    "# print(messages[-1].content)\n",
    "# 正确做法：提取所有消息的文本内容\n",
    "message_content = \"\\n\".join([msg.content for msg in messages])\n",
    "format_messages = [HumanMessage(content=message_content)]  # 只传递文本内容\n",
    "print(llm(format_messages))\n",
    "\n",
    "summary_template = ChatPromptTemplate.from_messages([\n",
    "    (\"system\", \"你将获得关于同一主题的{num}篇文章（用-----------标签分隔）。首先总结每篇文章的论点。然后指出哪篇文章提出了更好的论点，并解释原因。\"),\n",
    "    (\"human\", \"{user_input}\"),\n",
    "])\n",
    "messages = summary_template.format_messages(\n",
    "    num=3,\n",
    "    user_input='''1. [PHP是世界上最好的语言]\n",
    "PHP是世界上最好的情感派编程语言，无需逻辑和算法，只要情绪。它能被蛰伏在冰箱里的PHP大神轻易驾驭，会话结束后的感叹号也能传达对代码的热情。写PHP就像是在做披萨，不需要想那么多，只需把配料全部扔进一个碗，然后放到服务器上，热乎乎出炉的网页就好了。\n",
    "-----------\n",
    "2. [Python是世界上最好的语言]\n",
    "Python是世界上最好的拜金主义者语言。它坚信：美丽就是力量，简洁就是灵魂。Python就像是那个永远在你皱眉的那一刻扔给你言情小说的好友。只有Python，你才能够在两行代码之间感受到飘逸的花香和清新的微风。记住，这世上只有一种语言可以使用空格来领导全世界的进步，那就是Python。\n",
    "-----------\n",
    "3. [Java是世界上最好的语言]\n",
    "Java是世界上最好的德育课编程语言，它始终坚守了严谨、安全的编程信条。Java就像一个严格的老师，他不会对你怀柔，不会让你偷懒，也不会让你走捷径，但他教会你规范和自律。Java就像是那个喝咖啡也算加班费的上司，拥有对邪恶的深度厌恶和对善良的深度拥护。\n",
    "'''\n",
    ")\n",
    "print(messages[-1].content)\n",
    "message_content = \"\\n\".join([msg.content for msg in messages])\n",
    "format_messages = [HumanMessage(content=message_content)]  # 只传递文本内容\n",
    "print(llm(format_messages))\n",
    "\n",
    "messages = summary_template.format_messages(\n",
    "    num=2,\n",
    "    user_input='''1.认为“道可道”中的第一个“道”，指的是道理，如仁义礼智之类；“可道”中的“道”，指言说的意思；“常道”，指恒久存在的“道”。因此，所谓“道可道，非常道”，指的是可以言说的道理，不是恒久存在的“道”，恒久存在的“道”不可言说。如苏辙说：“莫非道也。而可道者不可常，惟不可道，而后可常耳。今夫仁义礼智，此道之可道者也。然而仁不可以为义，而礼不可以为智，可道之不可常如此。……而道常不变，不可道之能常如此。”蒋锡昌说：“此道为世人所习称之道，即今人所谓‘道理’也，第一‘道’字应从是解。《广雅·释诂》二：‘道，说也’，第二‘道’字应从是解。‘常’乃真常不易之义，在文法上为区别词。……第三‘道’字即二十五章‘道法自然’之‘道’，……乃老子学说之总名也”。陈鼓应说：“第一个‘道’字是人们习称之道，即今人所谓‘道理’。第二个‘道’字，是指言说的意思。第三个‘道’字，是老子哲学上的专有名词，在本章它意指构成宇宙的实体与动力。……‘常道’之‘常’，为真常、永恒之意。……可以用言词表达的道，就不是常道”。\n",
    "-----------\n",
    "2.认为“道可道”中的第一个“道”，指的是宇宙万物的本原；“可道”中的“道”，指言说的意思；“常道”，指恒久存在的“道”。因此，“道可道，非常道”，指可以言说的“道”，就不是恒久存在的“道”。如张默生说：“‘道’，指宇宙的本体而言。……‘常’，是经常不变的意思。……可以说出来的道，便不是经常不变的道”。董平说：“第一个‘道’字与‘可道’之‘道’，内涵并不相同。第一个‘道’字，是老子所揭示的作为宇宙本根之‘道’；‘可道’之‘道’，则是‘言说’的意思。……这里的大意就是说：凡一切可以言说之‘道’，都不是‘常道’或永恒之‘道’”。汤漳平等说：“第一句中的三个‘道’，第一、三均指形上之‘道’，中间的‘道’作动词，为可言之义。……道可知而可行，但非恒久不变之道”。\n",
    "--------\n",
    "3.认为“道可道”中的第一个“道”，指的是宇宙万物的本原；“可道”中的“道”，指言说的意思；“常道”，则指的是平常人所讲之道、常俗之道。因此，“道可道，非常道”，指“道”是可以言说的，但它不是平常人所谓的道或常俗之道。如李荣说：“道者，虚极之理也。夫论虚极之理，不可以有无分其象，不可以上下格其真。……圣人欲坦兹玄路，开以教门，借圆通之名，目虚极之理，以理可名，称之可道。故曰‘吾不知其名，字之曰道’。非常道者，非是人间常俗之道也。人间常俗之道，贵之以礼义，尚之以浮华，丧身以成名，忘己而徇利。”司马光说：“世俗之谈道者，皆曰道体微妙，不可名言。老子以为不然，曰道亦可言道耳，然非常人之所谓道也。……常人之所谓道者，凝滞于物。”裘锡圭说：“到目前为止，可以说，几乎从战国开始，大家都把‘可道’之‘道’……看成老子所否定的，把‘常道’‘常名’看成老子所肯定的。这种看法其实有它不合理的地方，……‘道’是可以说的。《老子》这个《道经》第一章，开宗明义是要讲他的‘道’。第一个‘道’字，理所应当，也是讲他要讲的‘道’：道是可以言说的。……那么这个‘恒’字应该怎么讲？我认为很简单，‘恒’字在古代作定语用，经常是‘平常’‘恒常’的意思。……‘道’是可以言说的，但是我要讲的这个‘道’，不是‘恒道’，它不是一般人所讲的‘道’。\n",
    "'''\n",
    ")\n",
    "print(messages)\n",
    "message_content = \"\\n\".join([msg.content for msg in messages])\n",
    "format_messages = [HumanMessage(content=message_content)]  # 只传递文本内容\n",
    "print(llm(format_messages))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "ca5a051c-1c3f-4632-bc8a-4a6e0d4d4604",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Question: 谁活得更久，穆罕默德·阿里还是艾伦·图灵？\n",
      "\n",
      "这里需要进一步的问题吗：是的。\n",
      "追问：穆罕默德·阿里去世时多大了？\n",
      "中间答案：穆罕默德·阿里去世时74岁。\n",
      "追问：艾伦·图灵去世时多大了？\n",
      "中间答案：艾伦·图灵去世时41岁。\n",
      "所以最终答案是：穆罕默德·阿里\n",
      "\n",
      "input_variables=['answer', 'question'] template='Question: {question}\\n{answer}'\n",
      "Question: 《大白鲨》和《皇家赌场》的导演是同一个国家的吗？\n",
      "\n",
      "这里需要进一步的问题吗：是的。\n",
      "追问：谁是《大白鲨》的导演？\n",
      "中间答案：《大白鲨》的导演是Steven Spielberg。\n",
      "追问：Steven Spielberg来自哪里？\n",
      "中间答案：美国。\n",
      "追问：谁是《皇家赌场》的导演？\n",
      "中间答案：《皇家赌场》的导演是Martin Campbell。\n",
      "追问：Martin Campbell来自哪里？\n",
      "中间答案：新西兰。\n",
      "所以最终答案是：不是\n",
      "\n",
      "Question: 谁活得更久，穆罕默德·阿里还是艾伦·图灵？\n",
      "Answer: \n",
      "这里需要进一步的问题吗：是的。\n",
      "追问：穆罕默德·阿里去世时多大了？\n",
      "中间答案：穆罕默德·阿里去世时74岁。\n",
      "追问：艾伦·图灵去世时多大了？\n",
      "中间答案：艾伦·图灵去世时41岁。\n",
      "所以最终答案是：穆罕默德·阿里\n",
      "\n",
      "Question: 谁活得更久，穆罕默德·阿里还是艾伦·图灵？\n",
      "\n",
      "这里需要进一步的问题吗：是的。\n",
      "追问：穆罕默德·阿里去世时多大了？\n",
      "中间答案：穆罕默德·阿里去世时74岁。\n",
      "追问：艾伦·图灵去世时多大了？\n",
      "中间答案：艾伦·图灵去世时41岁。\n",
      "所以最终答案是：穆罕默德·阿里\n",
      "\n",
      "\n",
      "Question: craigslist的创始人是什么时候出生的？\n",
      "\n",
      "这里需要进一步的问题吗：是的。\n",
      "追问：谁是craigslist的创始人？\n",
      "中间答案：Craigslist是由Craig Newmark创办的。\n",
      "追问：Craig Newmark是什么时候出生的？\n",
      "中间答案：Craig Newmark出生于1952年12月6日。\n",
      "所以最终答案是：1952年12月6日\n",
      "\n",
      "\n",
      "Question: 乔治·华盛顿的外祖父是谁？\n",
      "\n",
      "这里需要进一步的问题吗：是的。\n",
      "追问：谁是乔治·华盛顿的母亲？\n",
      "中间答案：乔治·华盛顿的母亲是Mary Ball Washington。\n",
      "追问：Mary Ball Washington的父亲是谁？\n",
      "中间答案：Mary Ball Washington的父亲是Joseph Ball。\n",
      "所以最终答案是：Joseph Ball\n",
      "\n",
      "\n",
      "Question: 《大白鲨》和《皇家赌场》的导演是同一个国家的吗？\n",
      "\n",
      "这里需要进一步的问题吗：是的。\n",
      "追问：谁是《大白鲨》的导演？\n",
      "中间答案：《大白鲨》的导演是Steven Spielberg。\n",
      "追问：Steven Spielberg来自哪里？\n",
      "中间答案：美国。\n",
      "追问：谁是《皇家赌场》的导演？\n",
      "中间答案：《皇家赌场》的导演是Martin Campbell。\n",
      "追问：Martin Campbell来自哪里？\n",
      "中间答案：新西兰。\n",
      "所以最终答案是：不是\n",
      "\n",
      "\n",
      "Question: 玛丽·波尔·华盛顿的父亲是谁?\n"
     ]
    }
   ],
   "source": [
    "\"\"\"使用 FewShotPromptTemplate 类生成 Few-shot Prompt。构造 few-shot prompt 的方法通常有两种：\n",
    "    从示例集（set of examples）中手动选择；\n",
    "    通过示例选择器（Example Selector）自动选择.\n",
    "下面介绍的是从示例集中手动选择\n",
    "\"\"\"\n",
    "from langchain.prompts.prompt import PromptTemplate\n",
    "# 导入 FewShotPromptTemplate 类\n",
    "from langchain.prompts.few_shot import FewShotPromptTemplate\n",
    "\n",
    "\n",
    "examples = [\n",
    "  {\n",
    "    \"question\": \"谁活得更久，穆罕默德·阿里还是艾伦·图灵？\",\n",
    "    \"answer\": \n",
    "\"\"\"\n",
    "这里需要进一步的问题吗：是的。\n",
    "追问：穆罕默德·阿里去世时多大了？\n",
    "中间答案：穆罕默德·阿里去世时74岁。\n",
    "追问：艾伦·图灵去世时多大了？\n",
    "中间答案：艾伦·图灵去世时41岁。\n",
    "所以最终答案是：穆罕默德·阿里\n",
    "\"\"\"\n",
    "  },\n",
    "  {\n",
    "    \"question\": \"craigslist的创始人是什么时候出生的？\",\n",
    "    \"answer\": \n",
    "\"\"\"\n",
    "这里需要进一步的问题吗：是的。\n",
    "追问：谁是craigslist的创始人？\n",
    "中间答案：Craigslist是由Craig Newmark创办的。\n",
    "追问：Craig Newmark是什么时候出生的？\n",
    "中间答案：Craig Newmark出生于1952年12月6日。\n",
    "所以最终答案是：1952年12月6日\n",
    "\"\"\"\n",
    "  },\n",
    "  {\n",
    "    \"question\": \"乔治·华盛顿的外祖父是谁？\",\n",
    "    \"answer\":\n",
    "\"\"\"\n",
    "这里需要进一步的问题吗：是的。\n",
    "追问：谁是乔治·华盛顿的母亲？\n",
    "中间答案：乔治·华盛顿的母亲是Mary Ball Washington。\n",
    "追问：Mary Ball Washington的父亲是谁？\n",
    "中间答案：Mary Ball Washington的父亲是Joseph Ball。\n",
    "所以最终答案是：Joseph Ball\n",
    "\"\"\"\n",
    "  },\n",
    "  {\n",
    "    \"question\": \"《大白鲨》和《皇家赌场》的导演是同一个国家的吗？\",\n",
    "    \"answer\":\n",
    "\"\"\"\n",
    "这里需要进一步的问题吗：是的。\n",
    "追问：谁是《大白鲨》的导演？\n",
    "中间答案：《大白鲨》的导演是Steven Spielberg。\n",
    "追问：Steven Spielberg来自哪里？\n",
    "中间答案：美国。\n",
    "追问：谁是《皇家赌场》的导演？\n",
    "中间答案：《皇家赌场》的导演是Martin Campbell。\n",
    "追问：Martin Campbell来自哪里？\n",
    "中间答案：新西兰。\n",
    "所以最终答案是：不是\n",
    "\"\"\"\n",
    "  }\n",
    "]\n",
    "example_prompt = PromptTemplate(\n",
    "    input_variables=[\"question\", \"answer\"],\n",
    "    template=\"Question: {question}\\n{answer}\"\n",
    ")\n",
    "\n",
    "# **examples[0] 是将examples[0] 字典的键值对（question-answer）解包并传递给format，作为函数参数\n",
    "print(example_prompt.format(**examples[0]))\n",
    "print(example_prompt)\n",
    "print(example_prompt.format(**examples[-1]))\n",
    "# 关于解包的示例\n",
    "def print_info(question, answer):\n",
    "    print(f\"Question: {question}\")\n",
    "    print(f\"Answer: {answer}\")\n",
    "\n",
    "print_info(**examples[0]) \n",
    "\n",
    "# 生成 Few-shot Prompt\n",
    "# 创建一个 FewShotPromptTemplate 对象\n",
    "few_shot_prompt = FewShotPromptTemplate(\n",
    "    examples=examples,           # 使用前面定义的 examples 作为范例\n",
    "    example_prompt=example_prompt, # 使用前面定义的 example_prompt 作为提示模板\n",
    "    suffix=\"Question: {input}\",    # 后缀模板，其中 {input} 会被替换为实际输入\n",
    "    input_variables=[\"input\"]     # 定义输入变量的列表\n",
    ")\n",
    "\n",
    "# 使用给定的输入格式化 prompt，并打印结果\n",
    "# 这里的 {input} 将被 \"玛丽·波尔·华盛顿的父亲是谁?\" 替换\n",
    "print(few_shot_prompt.format(input=\"玛丽·波尔·华盛顿的父亲是谁?\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "d8747a5e-1c43-4215-860f-4989a7156946",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "当前输入的是一种感受，所以应该选择 happy/sad 的示例\n",
      "Give the antonym of every input\n",
      "\n",
      "Input: happy\n",
      "Output: sad\n",
      "\n",
      "Input: worried\n",
      "Output:\n",
      "当前输入是一种度量，所以应该选择 tall/short的示例\n",
      "Give the antonym of every input\n",
      "\n",
      "Input: tall\n",
      "Output: short\n",
      "\n",
      "Input: long\n",
      "Output:\n",
      "当前输入是一种天气状况，所以应该选择 windy/calm的示例\n",
      "Give the antonym of every input\n",
      "\n",
      "Input: windy\n",
      "Output: calm\n",
      "\n",
      "Input: rain\n",
      "Output:\n"
     ]
    }
   ],
   "source": [
    "\"\"\"示例选择器 Example Selectors¶\n",
    "如果你有大量的参考示例，就得选择哪些要包含在提示中。最好还是根据某种条件或者规则来自动选择，Example Selector 是负责这个任务的类\n",
    "BaseExampleSelector 定义如下：\n",
    "\n",
    "class BaseExampleSelector(ABC):\n",
    "    # 用于选择包含在提示中的示例的接口。\n",
    "    @abstractmethod\n",
    "    def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:\n",
    "        # 根据输入选择要使用的示例。\n",
    "ABC 是 Python 中的 abc 模块中的一个缩写，它表示 \"Abstract Base Class\"（抽象基类）。在 Python 中，抽象基类用于定义其他类必须遵循的基本接口或蓝图，但不能直接实例化。其主要目的是为了提供一种形式化的方式来定义和检查子类的接口。\n",
    "\n",
    "使用抽象基类的几点关键信息：\n",
    "\n",
    "抽象方法：在抽象基类中，你可以定义抽象方法，它没有实现（也就是说，它没有方法体）。任何继承该抽象基类的子类都必须提供这些抽象方法的实现。\n",
    "\n",
    "不能直接实例化：你不能直接创建抽象基类的实例。试图这样做会引发错误。它们的主要目的是为了被继承，并在子类中实现其方法。\n",
    "\n",
    "强制子类实现：如果子类没有实现所有的抽象方法，那么试图实例化该子类也会引发错误。这确保了继承抽象基类的所有子类都遵循了预定的接口。\n",
    "\"\"\"\n",
    "from langchain.prompts.example_selector import SemanticSimilarityExampleSelector\n",
    "from langchain.vectorstores import Chroma\n",
    "# from langchain.embeddings import OpenAIEmbeddings\n",
    "from typing import List, Dict, Optional\n",
    "import requests\n",
    "from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n",
    "\n",
    "class LaozhangEmbeddings:\n",
    "    def __init__(self, \n",
    "                 api_key: str,\n",
    "                 model: str = \"text-embedding-ada-002\",\n",
    "                 base_url: str = \"https://api.laozhang.ai/v1\"):\n",
    "        \n",
    "        self.api_key = api_key\n",
    "        self.model = model\n",
    "        self.base_url = base_url\n",
    "        \n",
    "    def embed_documents(self, texts: List[str]) -> List[List[float]]:\n",
    "        \"\"\"嵌入多个文档\"\"\"\n",
    "        if not texts:\n",
    "            return []\n",
    "            \n",
    "        headers = {\n",
    "            \"Authorization\": f\"Bearer {self.api_key}\",\n",
    "            \"Content-Type\": \"application/json\"\n",
    "        }\n",
    "        data = {\n",
    "            \"model\": self.model,\n",
    "            \"input\": texts\n",
    "        }\n",
    "        \n",
    "        try:\n",
    "            response = requests.post(\n",
    "                f\"{self.base_url}/embeddings\",\n",
    "                headers=headers,\n",
    "                json=data,\n",
    "                timeout=15\n",
    "            )\n",
    "            \n",
    "            # 处理API错误响应\n",
    "            if response.status_code != 200:\n",
    "                error_info = response.json().get(\"error\", {})\n",
    "                error_msg = error_info.get(\"message\", \"未知错误\")\n",
    "                raise ValueError(f\"API错误({response.status_code}): {error_msg}\")\n",
    "                \n",
    "            return [item['embedding'] for item in response.json()['data']]\n",
    "            \n",
    "        except requests.exceptions.RequestException as e:\n",
    "            raise Exception(f\"API请求失败: {str(e)}\")\n",
    "\n",
    "    def embed_query(self, text: str) -> List[float]:\n",
    "        \"\"\"嵌入单个查询\"\"\"\n",
    "        return self.embed_documents([text])[0]\n",
    "\n",
    "# 初始化嵌入模型\n",
    "embeddings_model = LaozhangEmbeddings(\n",
    "    api_key=\"sk-tAdNbV9aGTCr4fcE2d4b07Aa9f5c443bA7Fb3dE0Ff82F1A1\"  # 替换为你的API密钥\n",
    ")\n",
    "\n",
    "# 嵌入文档示例\n",
    "# embeddings = embeddings_model.embed_documents([\n",
    "#     \"Hi there!\",\n",
    "#     \"Oh, hello!\",\n",
    "#     \"What's your name?\",\n",
    "#     \"My friends call me World\",\n",
    "#     \"Hello World!\"\n",
    "# ])\n",
    "# print(f\"嵌入数量: {len(embeddings)}\")\n",
    "# print(f\"每个嵌入的维度: {len(embeddings[0])}\")\n",
    "\n",
    "# # 嵌入查询示例\n",
    "# query = \"What was the name mentioned in the conversation?\"\n",
    "# query_embedding = embeddings_model.embed_query(query)\n",
    "# print(f\"查询嵌入维度: {len(query_embedding)}\")\n",
    "\n",
    "# 定义一个提示模板\n",
    "example_prompt = PromptTemplate(\n",
    "    input_variables=[\"input\", \"output\"],     # 输入变量的名字\n",
    "    template=\"Input: {input}\\nOutput: {output}\",  # 实际的模板字符串\n",
    ")\n",
    "\n",
    "# 这是一个假设的任务示例列表，用于创建反义词\n",
    "examples = [\n",
    "    {\"input\": \"happy\", \"output\": \"sad\"},\n",
    "    {\"input\": \"tall\", \"output\": \"short\"},\n",
    "    {\"input\": \"energetic\", \"output\": \"lethargic\"},\n",
    "    {\"input\": \"sunny\", \"output\": \"gloomy\"},\n",
    "    {\"input\": \"windy\", \"output\": \"calm\"},\n",
    "]\n",
    "\n",
    "\n",
    "# 从给定的示例中创建一个语义相似性选择器\n",
    "example_selector = SemanticSimilarityExampleSelector.from_examples(\n",
    "    examples,                          # 可供选择的示例列表\n",
    "    embeddings_model,                # 用于生成嵌入向量的嵌入类，用于衡量语义相似性\n",
    "    Chroma,                            # 用于存储嵌入向量并进行相似性搜索的 VectorStore 类\n",
    "    k=1                                # 要生成的示例数量\n",
    ")\n",
    "\n",
    "# 创建一个 FewShotPromptTemplate 对象\n",
    "similar_prompt = FewShotPromptTemplate(\n",
    "    example_selector=example_selector,  # 提供一个 ExampleSelector 替代示例\n",
    "    example_prompt=example_prompt,      # 前面定义的提示模板\n",
    "    prefix=\"Give the antonym of every input\", # 前缀模板\n",
    "    suffix=\"Input: {adjective}\\nOutput:\",     # 后缀模板\n",
    "    input_variables=[\"adjective\"],           # 输入变量的名字\n",
    ")\n",
    "\n",
    "# 输入是一种感受，所以应该选择 happy/sad 的示例。\n",
    "print(\"当前输入的是一种感受，所以应该选择 happy/sad 的示例\")\n",
    "print(similar_prompt.format(adjective=\"worried\"))\n",
    "# 输入是一种度量，所以应该选择 tall/short的示例。\n",
    "print(\"当前输入是一种度量，所以应该选择 tall/short的示例\")\n",
    "print(similar_prompt.format(adjective=\"long\"))\n",
    "print(\"当前输入是一种天气状况，所以应该选择 windy/calm的示例\")\n",
    "print(similar_prompt.format(adjective=\"rain\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "881666ad-9aa4-4731-b57e-4c27d9eb1b81",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "hlf_env",
   "language": "python",
   "name": "hlf_env"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
