{
 "cells": [
  {
   "cell_type": "markdown",
   "source": [
    "## 初识LangChain"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "f99e35730a4d9b3f"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain_openai import ChatOpenAI\n",
    "from dotenv import load_dotenv\n",
    "import os\n",
    "\n",
    "load_dotenv()\n",
    "\n",
    "# 由于国内无法调用openai api ， 使用月之暗面的api 代替\n",
    "llm = ChatOpenAI(\n",
    "    base_url=\"https://api.moonshot.cn/v1/\",\n",
    "    api_key=os.getenv(\"MOONSHOT_API_KEY\"),\n",
    "    model_name=\"moonshot-v1-8k\",\n",
    "    temperature=0.3\n",
    ")\n",
    "\n",
    "llm.invoke(\"Hello, how are you?\")"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "2ca4ef057babacc5",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "## prompt"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "7fa80db211b9e843"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain_openai import ChatOpenAI\n",
    "from langchain_core.prompts import ChatPromptTemplate\n",
    "from langchain_core.output_parsers import StrOutputParser\n",
    "\n",
    "from dotenv import load_dotenv\n",
    "import os\n",
    "\n",
    "load_dotenv()\n",
    "\n",
    "# 由于国内无法调用openai api ， 使用月之暗面的api 代替\n",
    "llm = ChatOpenAI(\n",
    "    base_url=\"https://api.moonshot.cn/v1/\",\n",
    "    api_key=os.getenv(\"MOONSHOT_API_KEY\"),\n",
    "    model_name=\"moonshot-v1-8k\",\n",
    "    temperature=0.3\n",
    ")\n",
    "\n",
    "# 提示1\n",
    "prompt = ChatPromptTemplate.from_messages([\n",
    "    (\"system\", \"You are world class technical documentation writer.\"),\n",
    "    (\"user\", \"{input}\")\n",
    "])\n",
    "\n",
    "chain = llm | prompt\n",
    "\n",
    "chain.invoke({\"input\": \"how can langsmith help with testing?\"})"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "21c050a7b7a414c7",
   "execution_count": null
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "data": {
      "text/plain": "[SystemMessage(content='Translate the following into italian:'),\n HumanMessage(content='hi')]"
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from langchain_openai import ChatOpenAI\n",
    "from langchain_core.prompts import ChatPromptTemplate\n",
    "\n",
    "from dotenv import load_dotenv\n",
    "import os\n",
    "\n",
    "load_dotenv()\n",
    "\n",
    "# 由于国内无法调用openai api ， 使用月之暗面的api 代替\n",
    "llm = ChatOpenAI(\n",
    "    base_url=\"https://api.moonshot.cn/v1/\",\n",
    "    api_key=os.getenv(\"MOONSHOT_API_KEY\"),\n",
    "    model_name=\"moonshot-v1-8k\",\n",
    "    temperature=0.3\n",
    ")\n",
    "\n",
    "# 提示2\n",
    "system_template = \"Translate the following into {language}:\"\n",
    "\n",
    "prompt_template = ChatPromptTemplate.from_messages([\n",
    "    (\"system\", system_template), \n",
    "    (\"user\", \"{text}\")\n",
    "])\n",
    "\n",
    "result = prompt_template.invoke({\"language\": \"italian\", \"text\": \"hi\"})\n",
    "\n",
    "result\n",
    "# ChatPromptValue(messages=[SystemMessage(content='Translate the following into italian:'), HumanMessage(content='hi')])\n",
    "\n",
    "\n",
    "result.to_messages()\n",
    "# [SystemMessage(content='Translate the following into italian:'), \n",
    "# HumanMessage(content='hi')]"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-07-19T05:37:23.115617Z",
     "start_time": "2024-07-19T05:37:23.049826Z"
    }
   },
   "id": "c6ff39b60efc83fb",
   "execution_count": 8
  },
  {
   "cell_type": "markdown",
   "source": [
    "##  parser"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "84223c0f5363b519"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain_openai import ChatOpenAI\n",
    "from langchain_core.prompts import ChatPromptTemplate\n",
    "from langchain_core.output_parsers import StrOutputParser\n",
    "\n",
    "from dotenv import load_dotenv\n",
    "import os\n",
    "\n",
    "load_dotenv()\n",
    "\n",
    "# 1. model\n",
    "# 由于国内无法调用openai api ， 使用月之暗面的api 代替\n",
    "llm = ChatOpenAI(\n",
    "    base_url=\"https://api.moonshot.cn/v1/\",\n",
    "    api_key=os.getenv(\"MOONSHOT_API_KEY\"),\n",
    "    model_name=\"moonshot-v1-8k\",\n",
    "    temperature=0.3\n",
    ")\n",
    "\n",
    "# 2. prompt\n",
    "prompt = ChatPromptTemplate.from_messages([\n",
    "    (\"system\", \"You are world class technical documentation writer.\"),\n",
    "    (\"user\", \"{input}\")\n",
    "])\n",
    "\n",
    "# 3. parser\n",
    "parser = StrOutputParser()\n",
    "\n",
    "# 4. chain\n",
    "chain = prompt | llm | parser\n",
    "\n",
    "chain.invoke({\"input\": \"how can langsmith help with testing?\"})"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "1c66a425338effef",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 检索链\n",
    "\n",
    "> index ? \n",
    "> Retrieval ? \n",
    "> Retrieval augmented generation (RAG)  检索增强生成\n",
    "\n",
    "为了正确回答原始问题（“langsmith如何帮助测试？”），我们需要为LLM提供附加上下文。\n",
    "我们可以通过检索来实现这一点。当您有过多数据需要传递给LLM时，可使用检索工具仅检索相关数据并传递给LLM。\n",
    "在此过程中，我们将从检索器中查找相关文档，然后将它们传递给提示符。\n",
    "检索器可以由任何东西支持: 一个SQL表、互联网等\n",
    "但在本例中，我们将填充一个向量存储并将其用作检索器。有关向量存储的更多信息，请参阅[此文档](https://python.langchain.com/v0.2/docs/concepts/#retrieval)。"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "a6fdd0528e733aaa"
  },
  {
   "cell_type": "markdown",
   "source": [
    "首先，需要加载要索引的数据。为此，我们将使用WebBaseLoader。这需要安装[BeautifulSoup](https://beautiful-soup-4.readthedocs.io/en/latest/)\n",
    "```shell\n",
    "pip install beautifulsoup4\n",
    "```\n",
    "\n",
    "然后，我们可以导入并使用WebBaseLoader\n",
    "```python\n",
    "from langchain_community.document_loaders import WebBaseLoader\n",
    "loader = WebBaseLoader(\"https://docs.smith.langchain.com/user_guide\")\n",
    " \n",
    "docs = loader.load()\n",
    "```\n",
    "\n",
    "接下来，我们需要将其索引到向量存储中。这需要一些组件，即[嵌入模型](https://python.langchain.com/v0.2/docs/concepts/#embedding-models)和[向量存储](https://python.langchain.com/v0.2/docs/concepts/#vector-stores)。\n",
    "\n",
    "**嵌入模型 embedding model:**\n",
    "- OpenAIEmbeddings\n",
    "- OllamaEmbeddings\n",
    "- CohereEmbeddings\n",
    "- HuggingFaceEmbeddings\n",
    "- SentenceTransformerEmbeddings\n",
    "\n",
    "```python\n",
    "# OpenAIEmbeddings\n",
    "from langchain_openai import OpenAIEmbeddings\n",
    "embeddings = OpenAIEmbeddings()\n",
    "\n",
    "# OllamaEmbeddings\n",
    "from langchain_ollama import OllamaEmbeddings\n",
    "embeddings = OllamaEmbeddings()\n",
    "\n",
    "# CohereEmbeddings\n",
    "from langchain_cohere import CohereEmbeddings\n",
    "embeddings = CohereEmbeddings()\n",
    "\n",
    "# HuggingFaceEmbeddings\n",
    "from langchain_huggingface import HuggingFaceEmbeddings\n",
    "embeddings = HuggingFaceEmbeddings()\n",
    "\n",
    "# SentenceTransformerEmbeddings\n",
    "from langchain_sentence_transformer import SentenceTransformerEmbeddings\n",
    "embeddings = SentenceTransformerEmbeddings()\n",
    "```\n",
    "\n",
    "**向量存储：**\n",
    "- Chroma\n",
    "- FAISS\n",
    "- Pinecone\n",
    "- Weaviate\n",
    "- Qdrant\n",
    "- ElasticSearch\n",
    "- Supabase\n",
    "- Milvus\n",
    "- Redis\n",
    "\n",
    "安装向量存储依赖库\n",
    "```shell\n",
    "pip install chromadb\n",
    "pip install faiss-cpu\n",
    "pip install pinecone-client\n",
    "pip install weaviate-client\n",
    "pip install qdrant-client\n",
    "pip install elasticsearch\n",
    "pip install supabase\n",
    "pip install milvus-sdk\n",
    "pip install redis\n",
    "```\n",
    "\n",
    "调用向量存储库\n",
    "```python\n",
    "# Chroma\n",
    "from langchain_chroma import Chroma\n",
    "docsearch = Chroma.from_documents(docs, embeddings)\n",
    "\n",
    "# FAISS\n",
    "from langchain_community.vectorstores import FAISS\n",
    "docsearch = FAISS.from_documents(docs, embeddings)\n",
    "\n",
    "# Pinecone\n",
    "from langchain_pinecone import Pinecone\n",
    "docsearch = Pinecone.from_documents(docs, embeddings, index_name=\"my-index\")\n",
    "\n",
    "# Weaviate\n",
    "from langchain_weaviate import Weaviate\n",
    "docsearch = Weaviate.from_documents()\n",
    "\n",
    "# Qdrant\n",
    "from langchain_qdrant import Qdrant\n",
    "docsearch = Qdrant.from_documents(docs, embeddings)\n",
    "\n",
    "# ElasticSearch\n",
    "from langchain_elasticsearch import ElasticSearch\n",
    "docsearch = ElasticSearch.from_documents(docs, embeddings)\n",
    "\n",
    "# Supabase\n",
    "from langchain_supabase import Supabase\n",
    "docsearch = Supabase.from_documents(docs, embeddings)\n",
    "\n",
    "# Milvus\n",
    "from langchain_milvus import Milvus\n",
    "docsearch = Milvus.from_documents(docs, embeddings)\n",
    "\n",
    "# Redis\n",
    "from langchain_redis import Redis\n",
    "docsearch = Redis.from_documents(docs, embeddings)\n",
    "```"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "6d10fa5c085716c"
  },
  {
   "cell_type": "markdown",
   "source": [
    "**使用嵌入模型和向量存储库构建检索器**"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "3454558ccd2a15de"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "# 以faiss 为例\n",
    "from langchain_community.vectorstores import FAISS\n",
    "from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
    "from langchain_community.document_loaders import WebBaseLoader\n",
    "from langchain_openai import OpenAIEmbeddings\n",
    "from langchain.chains.combine_documents import create_stuff_documents_chain\n",
    "from langchain_core.documents import Document\n",
    "\n",
    "\n",
    "# ...\n",
    "\n",
    "\n",
    "# 加载数据\n",
    "loader = WebBaseLoader(\"https://docs.smith.langchain.com/user_guide\")\n",
    "docs = loader.load()\n",
    "\n",
    "# 创建嵌入模型\n",
    "embeddings = OpenAIEmbeddings()\n",
    "\n",
    "# 创建文本拆分器\n",
    "text_splitter = RecursiveCharacterTextSplitter()\n",
    "# 将文档拆分为小块\n",
    "documents = text_splitter.split_documents(docs)\n",
    "# 创建向量存储库\n",
    "vector = FAISS.from_documents(documents, embeddings)\n",
    "\n",
    "# 创建提示模板\n",
    "prompt = ChatPromptTemplate.from_template(\"\"\"Answer the following question based only on the provided context:\n",
    " \n",
    "<context>\n",
    "{context}\n",
    "</context>\n",
    " \n",
    "Question: {input}\"\"\")\n",
    " \n",
    " \n",
    " # 创建 LLM\n",
    "document_chain = create_stuff_documents_chain(llm, prompt)\n",
    "\n",
    "# \n",
    "document_chain.invoke({\n",
    "    \"input\": \"how can langsmith help with testing?\",\n",
    "    \"context\": [Document(page_content=\"langsmith can let you visualize test results\")]\n",
    "})"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "ab317598f58a8c6d"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
