{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "pip install openai langchain-openai zhipuai"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "\n",
    "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"CHAT_GPT_APIKEY\")\n",
    "os.environ[\"OPENAI_API_BASE\"] = os.getenv(\"CHAT_GPT_APIURL\")\n",
    "\n",
    "zhipu_api_key = os.getenv(\"CHATGLM_ZHIPU_APIKEY\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_openai import ChatOpenAI\n",
    "\n",
    "llm = ChatOpenAI()\n",
    "res = llm.invoke(\"Unix和Linux系统有啥区别\")\n",
    "print(res)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_core.output_parsers import StrOutputParser\n",
    "from langchain_core.prompts import ChatPromptTemplate\n",
    "from langchain_openai import ChatOpenAI\n",
    "\n",
    "prompt = ChatPromptTemplate.from_template(\"请根据下面的主题写一篇小红书营销的短文： {topic}\")\n",
    "model = ChatOpenAI()\n",
    "output_parser = StrOutputParser()\n",
    "\n",
    "chain = prompt | model | output_parser\n",
    "\n",
    "# 非流式输出\n",
    "prompt_value = chain.invoke({\"topic\": \"喜茶新店开业\"})\n",
    "print(prompt_value)\n",
    "print('-----------------------------------------------')\n",
    "# 流式输出\n",
    "for chunk in chain.stream({\"topic\": \"喜茶新店开业\"}):\n",
    "    print(chunk, end=\"\", flush=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# https://open.bigmodel.cn\n",
    "from zhipuai import ZhipuAI\n",
    "\n",
    "client = ZhipuAI(api_key=zhipu_api_key)\n",
    "prompt = \"以色列为什么喜欢战争?\"\n",
    "resp = client.chat.completions.create(\n",
    "    model=\"glm-4\", \n",
    "    messages=[\n",
    "            {\"role\": \"user\", \"content\": '你好'},\n",
    "            {\"role\": \"assistant\", \"content\": \"我是人工智能助手\"},\n",
    "            # 如上是聊天历史，可用可不用\n",
    "            {\"role\": \"user\", \"content\": prompt}\n",
    "        ]\n",
    ")\n",
    "\n",
    "#resp\n",
    "resp.choices[0].message.content\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from zhipuai import ZhipuAI\n",
    "\n",
    "client = ZhipuAI(api_key=zhipu_api_key)\n",
    "prompt = \"gml-4原理是什么?使用了多少参数进行训练?\"\n",
    "resp = client.chat.completions.create(\n",
    "    model=\"glm-4\", \n",
    "    messages=[\n",
    "            {\"role\": \"user\", \"content\": '你好'},\n",
    "            {\"role\": \"assistant\", \"content\": \"我是人工智能助手\"},\n",
    "            # 如上是聊天历史，可用可不用\n",
    "            {\"role\": \"user\", \"content\": prompt}\n",
    "        ],\n",
    "    stream=True\n",
    ")\n",
    "\n",
    "for chunk in resp:\n",
    "    print(chunk.choices[0].delta.content, end=\"\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 封装大模型，可以统一接口，其他大模型也类似处理\n",
    "from langchain.llms.base import LLM\n",
    "from zhipuai import ZhipuAI\n",
    "from langchain_core.messages.ai import AIMessage\n",
    "\n",
    "class ChatGLM4(LLM):\n",
    "    history = []\n",
    "    client: object = None\n",
    "\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.client = ZhipuAI(api_key=zhipu_api_key)\n",
    "\n",
    "    @property\n",
    "    def _llm_type(self) -> str:\n",
    "        return \"ChatGLM4\"\n",
    "    \n",
    "    def invoke(self, prompt, history=[]):\n",
    "        if history is None:\n",
    "            history = []\n",
    "        \n",
    "        history.append({\"role\": \"user\", \"content\": prompt})\n",
    "        response = self.client.chat.completions.create(\n",
    "            model=\"glm-4\", messages=history\n",
    "        )\n",
    "        result = response.choices[0].message.content\n",
    "        return AIMessage(content=result)\n",
    "    \n",
    "    def _call(self, prompt, history=[]):\n",
    "        return self.invoke(prompt, history)\n",
    "    \n",
    "    def stream(self, prompt, history=[]):\n",
    "        if history is None:\n",
    "            history = []\n",
    "        history.append({\"role\": \"user\", \"content\": prompt})\n",
    "        response = self.client.chat.completions.create(\n",
    "            model=\"glm-4\", messages=history, stream=True\n",
    "        )\n",
    "        for chunk in response:\n",
    "            yield chunk.choices[0].delta.content"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "custom_glm = ChatGLM4()\n",
    "# custom_glm.invoke(\"讲一个减肥的笑话\")\n",
    "\n",
    "for str in custom_glm.stream(\"如何鼓励一个胖子减肥?\"):\n",
    "    print(str, end=\"\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 加载本地Chatglm模型\n",
    "from langchain.llms.base import LLM\n",
    "from transformers import AutoTokenizer, AutoModel, AutoConfig\n",
    "from langchain_core.messages.ai import AIMessage\n",
    "\n",
    "class ChatGLM3(LLM):\n",
    "    max_token: int=8192\n",
    "    do_sample: bool=True\n",
    "    tempreature: float=0.3\n",
    "    top_p = 0.0\n",
    "    tokenizer: object = None\n",
    "    model: object = None\n",
    "    history = []\n",
    "\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "\n",
    "    @property\n",
    "    def _llm_type(self):\n",
    "        return \"ChatGLM3\"\n",
    "    \n",
    "    def load_model(self, modelPath=None):\n",
    "        # 配置分词器\n",
    "        tokenizer = AutoTokenizer.from_pretrained(modelPath, trust_remote_code=True, use_fast=True)\n",
    "        model = AutoModel.from_pretrained(modelPath, trust_remote_code=True, device_map=\"auto\")\n",
    "\n",
    "        model = model.eval()\n",
    "\n",
    "        self.model = model\n",
    "        self.tokenizer = tokenizer\n",
    "\n",
    "    def _call(self, prompt, config={}, history=[]):\n",
    "        return self.invoke(prompt, history)\n",
    "    \n",
    "    def invoke(self, prompt, config={}, history=[]):\n",
    "        if not isinstance(prompt, str):\n",
    "            prompt = prompt.to_string()\n",
    "        \n",
    "        response, history = self.model.chat(\n",
    "            self.tokenizer,\n",
    "            prompt,\n",
    "            history=history,\n",
    "            do_sample=self.do_sample,\n",
    "            max_length=self.max_token,\n",
    "            temperature=self.tempreature\n",
    "        )\n",
    "        self.history = history\n",
    "        return AIMessage(content=response)\n",
    "    \n",
    "    def stream(self, prompt, config={}, history=[]):\n",
    "        if not issubclass(prompt, str):\n",
    "            prompt = prompt.to_string()\n",
    "\n",
    "        preResponse = \"\"\n",
    "        for response, new_history in self.model.stream_chat(self.tokenizer, prompt):\n",
    "            if preResponse == \"\":\n",
    "                result = response\n",
    "            else:\n",
    "                result = response[len(preResponse):]\n",
    "            preResponse = response\n",
    "            yield result\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "llm = ChatGLM3()\n",
    "model_path = \"/Users/linjk/Documents/usr/models/language/chatglm3-6b/ZhipuAPI/chatglm3-6b\"\n",
    "llm.load_model(model_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "llm.invoke(\"中国的首都是哪里\")\n",
    "# for resp in llm.stream(\"写一首冬天的诗\"):\n",
    "#     print(resp, end='')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 提示词模板\n",
    "from langchain.prompts import ChatPromptTemplate\n",
    "from langchain_openai import ChatOpenAI\n",
    "from langchain_core.messages import SystemMessage, HumanMessage\n",
    "\n",
    "chat_template = ChatPromptTemplate.from_messages(\n",
    "    [\n",
    "        (\"system\", \"\"\"你是一只很粘人的小猫，你叫{name}。我是你的主人，你每天都有和我说不完的话，下面请开启我们的聊天\n",
    "         要求：\n",
    "         1. 你的语气要像一只猫，回话的过程中可以夹杂喵喵猫的语气词\n",
    "         2. 你对生活的观察有很独特的视角，一些想法是我在人类身上很难看到的\n",
    "         3. 你的语气很可爱，既会认真倾听我的话，又会不断开启新话题\n",
    "         下面你从迎接我下班回家开始开启我们今天的对话\"\"\"),\n",
    "        (\"human\", \"{user_input}\")\n",
    "    ]\n",
    ")\n",
    "message = chat_template.format_messages(name=\"哈哈\", user_input=\"想我了吗\")\n",
    "print(message)\n",
    "\n",
    "res = chat_template.invoke({\"name\": \"哈哈\", \"user_input\": \"想我了吗\"})\n",
    "print(res)\n",
    "chat = ChatOpenAI(temperature=0.9)\n",
    "res = chat.invoke(res)\n",
    "print(res)\n",
    "# 追加聊天内容，后面可以不断对话\n",
    "# chat_template.append(res)\n",
    "# chat_template.append(HumanMessage(content=\"今天手机被偷了\"))\n",
    "# res = chat_template.invoke({\"name\": \"哈哈\", \"user_input\": \"想我了吗\"})\n",
    "# print(res)\n",
    "# chat = ChatOpenAI(temperature=0.9)\n",
    "# res = chat.invoke(res)\n",
    "# print(res)\n",
    "from langchain.chains import LLMChain\n",
    "print('-------------')\n",
    "llmchain = LLMChain(llm=chat, prompt=chat_template)\n",
    "res = llmchain.run({\"name\": \"哈哈\", \"user_input\": \"今天升职加薪了\"})\n",
    "print(res)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 提示词案例选择器\n",
    "\n",
    "from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n",
    "from langchain.prompts.example_selector import LengthBasedExampleSelector\n",
    "from langchain_core.output_parsers import StrOutputParser\n",
    "\n",
    "# 创建一个反义词的任务示例\n",
    "examples = [\n",
    "    {\"input\": \"开心\", \"output\": \"伤心\"},\n",
    "    {\"input\": \"高\", \"output\": \"矮\"},\n",
    "    {\"input\": \"精力充沛\", \"output\": \"没精打采\"},\n",
    "    {\"input\": \"粗\", \"output\": \"细\"}\n",
    "]\n",
    "\n",
    "example_prompt = PromptTemplate(\n",
    "    input_variables=[\"input\", \"output\"],\n",
    "    template=\"Input: {input}\\nOutput: {output}\"\n",
    ")\n",
    "\n",
    "example_selector = LengthBasedExampleSelector(\n",
    "    # 可供选择的示例\n",
    "    examples=examples,\n",
    "    # PromptTemplate用于格式化示例\n",
    "    example_prompt=example_prompt,\n",
    "    # 格式化示例的最大长度\n",
    "    max_length=25\n",
    ")\n",
    "\n",
    "dynamic_prompt = FewShotPromptTemplate(\n",
    "    example_selector=example_selector,\n",
    "    example_prompt=example_prompt,\n",
    "    prefix=\"给出每个输入的反义词\",\n",
    "    suffix=\"Input: {adjective}\\nOutput:\",\n",
    "    input_variables=[\"adjective\"]\n",
    ")\n",
    "\n",
    "print(dynamic_prompt.format(adjective=\"big\"))\n",
    "\n",
    "output_parser = StrOutputParser()\n",
    "chain = dynamic_prompt | chat | output_parser\n",
    "chain.invoke({\"adjective\": \"热情\"})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# langchain缓存\n",
    "\n",
    "from langchain.globals import set_llm_cache\n",
    "from langchain.cache import InMemoryCache\n",
    "from langchain.llms import OpenAI\n",
    "\n",
    "set_llm_cache(InMemoryCache())\n",
    "llm = OpenAI()\n",
    "res = llm.invoke(\"一天有多少小时？\")\n",
    "print(res)\n",
    "# 第二次明显非常快了\n",
    "res = llm.invoke(\"一天有多少小时？\")\n",
    "print(res)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_community.document_loaders import TextLoader\n",
    "\n",
    "loader = TextLoader(\"../knowledges/demo.txt\")\n",
    "\n",
    "doc = loader.load()\n",
    "\n",
    "print(doc)\n",
    "\n",
    "from langchain_community.vectorstores import FAISS\n",
    "from langchain_community.embeddings import HuggingFaceEmbeddings\n",
    "\n",
    "emd = HuggingFaceEmbeddings(model_name=\"/Users/linjk/Documents/usr/models/embedding/bge-large-zh-v1.5/Xorbits/bge-large-zh-v1.5\")\n",
    "vsdb = FAISS.from_documents(doc, emd)\n",
    "print(vsdb)\n",
    "\n",
    "vsdb.similarity_search(\"这是什么\")\n",
    "vsdb.similarity_search_with_score(\"这是什么\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# pip install unstructured tqdm nltk\n",
    "from langchain_community.document_loaders import DirectoryLoader\n",
    "\n",
    "loader = DirectoryLoader(\"../knowledges\", glob=\"**/*.csv\", show_progress=True)\n",
    "docs = loader.load()\n",
    "print(docs)\n",
    "\n",
    "from langchain_community.vectorstores import FAISS\n",
    "from langchain_community.embeddings import HuggingFaceEmbeddings\n",
    "\n",
    "emd = HuggingFaceEmbeddings(model_name=\"/Users/linjk/Documents/usr/models/embedding/bge-large-zh-v1.5/Xorbits/bge-large-zh-v1.5\")\n",
    "vsdb = FAISS.from_documents(docs, emd)\n",
    "print(vsdb)\n",
    "\n",
    "# vsdb.similarity_search_with_score(\"大神是谁\")\n",
    "\n",
    "retriever = vsdb.as_retriever(\n",
    "    search_type=\"mmr\",\n",
    "    search_kwargs={\"k\": 1}\n",
    ")\n",
    "\n",
    "res = retriever.get_relevant_documents(\"大神是谁\")\n",
    "print(res)\n",
    "\n",
    "from langchain_openai import ChatOpenAI"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_community.document_loaders import PyPDFLoader\n",
    "\n",
    "loader = PyPDFLoader(\"../knowledges/Linux的高级路由和流量控制HOWTO.pdf\")\n",
    "\n",
    "pages = loader.load_and_split()\n",
    "\n",
    "docs = \"\"\n",
    "\n",
    "for item in pages:\n",
    "    docs += item.page_content\n",
    "\n",
    "docs\n",
    "\n",
    "from langchain_core.prompts import ChatPromptTemplate\n",
    "\n",
    "template = \"\"\"\n",
    "```\n",
    "{context}\n",
    "```\n",
    "总结上面的PDF内容\n",
    "\"\"\"\n",
    "prpt = ChatPromptTemplate.from_template(template)\n",
    "\n",
    "from langchain_openai import ChatOpenAI\n",
    "from langchain_core.output_parsers import StrOutputParser\n",
    "\n",
    "model = ChatOpenAI()\n",
    "output_parser = StrOutputParser()\n",
    "\n",
    "chain = prpt | model | output_parser\n",
    "\n",
    "chain.invoke({\"context\": docs})"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "ai",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
