{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ[\"OPENAI_API_BASE\"] = \"http://192.168.2.47:8000/v1\"\n",
    "os.environ[\"OPENAI_API_KEY\"] = \"xxx\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n",
    "embedding = HuggingFaceEmbeddings(model_name=r\"E:\\models\\BAAIbge-large-zh-v1.5\",\n",
    "                                            model_kwargs={'device': \"cuda\"})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Administrator\\AppData\\Roaming\\Python\\Python310\\site-packages\\langchain_core\\_api\\deprecation.py:117: LangChainDeprecationWarning: The class `langchain_community.llms.openai.OpenAI` was deprecated in langchain-community 0.0.10 and will be removed in 0.2.0. An updated version of the class exists in the langchain-openai package and should be used instead. To use it run `pip install -U langchain-openai` and import as `from langchain_openai import OpenAI`.\n",
      "  warn_deprecated(\n",
      "C:\\Users\\Administrator\\AppData\\Roaming\\Python\\Python310\\site-packages\\langchain_core\\_api\\deprecation.py:117: LangChainDeprecationWarning: The function `predict_messages` was deprecated in LangChain 0.1.7 and will be removed in 0.2.0. Use invoke instead.\n",
      "  warn_deprecated(\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "AIMessage(content='')"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 加载大模型\n",
    "from langchain_community.llms.openai import OpenAI\n",
    "from langchain.schema import ChatMessage\n",
    "from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
    "llm =OpenAI(temperature=0,verbose=True,callbacks=[StreamingStdOutCallbackHandler()])\n",
    "llm.predict_messages([ChatMessage(content='你好\\n',role='user')])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# loaders \n",
    "import mimetypes  \n",
    "from langchain.document_loaders.base import BaseLoader\n",
    "from langchain_community.document_loaders import PyPDFium2Loader\n",
    "documents=PyPDFium2Loader(file_path=r'./华昭府销售说辞初稿-未审定0929.pdf',extract_images=True).load()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Splitter\n",
    "from langchain.text_splitter import CharacterTextSplitter\n",
    "text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=0,is_separator_regex=True)\n",
    "documents = text_splitter.split_documents(documents)\n",
    "for k,v in enumerate(documents):\n",
    "    documents[k].metadata['id']=k"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 加载向量存储\n",
    "from langchain_community.vectorstores.milvus import Milvus\n",
    "from langchain.schema import Document\n",
    "vector_store=Milvus(embedding_function=embedding,collection_name='PolicyKnowledge_test',connection_args={ \"host\": \"192.168.2.46\",\"port\": \"19530\",\"user\": \"root\",\"password\": \"Milvus\",}  ,index_params = {\"index_type\": \"IVF_FLAT\",\"metric_type\": \"COSINE\",\"params\": {\"nlist\": 1024}, },search_params = {\"metric_type\": \"COSINE\",\"nprobe\": 12, \"top_k\": 10,},vector_field='vector',drop_old=False,auto_id=True,primary_field='_id')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[(Document(page_content='新房网签时间不定，一般在合同签订后的30个工作日以内，具体以售楼处和房管局为主。', metadata={'content': '网签', 'id': 45, 'pk': 448670017636795610}),\n",
       "  0.046220358461141586),\n",
       " (Document(page_content='有了网签合同就可以办理贷款', metadata={'content': '网签', 'id': 47, 'pk': 448670017636795612}),\n",
       "  0.03904831409454346),\n",
       " (Document(page_content='当本期登记人数超过房源数量的10倍，就会触发社保排序，社保时间长的优先获得。', metadata={'content': '社保', 'id': 38, 'pk': 448670017636795603}),\n",
       "  0.030087806284427643),\n",
       " (Document(page_content='8.5新政规定购房意向登记家庭社保缴纳时间自2006年1月起算，到最新登记月份的社保数', metadata={'content': '社保', 'id': 37, 'pk': 448670017636795602}),\n",
       "  0.029066890478134155),\n",
       " (Document(page_content='大学生可以参与摇号', metadata={'content': '大学生摇号', 'id': 7, 'pk': 448670017636795572}),\n",
       "  0.022340910509228706),\n",
       " (Document(page_content='一般交付后90天内办理产证', metadata={'content': '交付', 'id': 48, 'pk': 448670017636795613}),\n",
       "  0.0200367234647274),\n",
       " (Document(page_content='一般来说，资料齐全，在银行额度不紧张的情况下，2周内可以放贷', metadata={'content': '公积金贷款', 'id': 32, 'pk': 448670017636795597}),\n",
       "  0.015805482864379883),\n",
       " (Document(page_content='一般来说，只要资料齐全，在银行额度不紧张的情况下，2周以内可以放贷', metadata={'content': '贷款', 'id': 23, 'pk': 448670017636795588}),\n",
       "  0.015025313943624496),\n",
       " (Document(page_content='根据《关于进一步加强房地产市场调控的通知》中显示，购房意向登记家庭社保缴纳时间自2006年1月起算，按购房家庭成员中在本市限购范围内社保累计缴纳时间最长的一方计算，因工作调动等原因补缴社保，累计补缴社保不超过3个月（含）的，可计入。', metadata={'content': '社保', 'id': 41, 'pk': 448670017636795606}),\n",
       "  0.012664769776165485),\n",
       " (Document(page_content='杭州线上购房流程如下:(1）打开支付宝搜索并关注“公证云开盘”(生活号);(2）实名认证购房人的个人信息（—定要用登记时的主购房人信息登入);(3）线上签到;(4）观看选房直播;(5）进入选房界面，收藏房源;(6）成功选房认购出价;(7）成功支付定金即为最终认购成功;(8）订单查询;(9）至项目营销中心完成纸质认购协议的签订和缴纳首付款。', metadata={'content': '线上购房', 'id': 11, 'pk': 448670017636795576}),\n",
       "  0.012026671320199966)]"
      ]
     },
     "execution_count": 37,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "vector_store.similarity_search_with_score('你好',k=10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "vector_store.add_documents(documents=documents)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "vector_store.add_documents(documents=documents)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#分解搜索\n",
    "from typing import List\n",
    "\n",
    "from langchain.chains import LLMChain\n",
    "from langchain_core.output_parsers import PydanticOutputParser\n",
    "from langchain_core.output_parsers.string import StrOutputParser\n",
    "from langchain_core.output_parsers.list import ListOutputParser\n",
    "from langchain.prompts import PromptTemplate,SystemMessagePromptTemplate,HumanMessagePromptTemplate,ChatPromptTemplate\n",
    "from pydantic import BaseModel, Field\n",
    "from langchain_community.chat_models import ChatOpenAI\n",
    "from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
    "from langchain_core.messages.human import HumanMessage\n",
    "from langchain_core.messages.ai import AIMessage\n",
    "\n",
    "chatllm=ChatOpenAI(streaming=True,verbose=True)\n",
    "class LineListOutputParser(ListOutputParser):\n",
    "    def parse(self, text: str) :\n",
    "        lines = text.strip().split(\"\\n\")\n",
    "        print(lines)\n",
    "        return lines\n",
    "# 基于换行提示词输出\n",
    "# class LineList(BaseModel):\n",
    "#     lines: List[str] = Field(description=\"Lines of text\")\n",
    "    \n",
    "# class LineListOutputParser(PydanticOutputParser):\n",
    "#     def __init__(self) -> None:\n",
    "#         super().__init__(pydantic_object=LineList)\n",
    "\n",
    "#     def parse(self, text: str) -> LineList:\n",
    "#         lines = text.strip().split(\"\\n\")\n",
    "        \n",
    "#         print(lines)\n",
    "#         return LineList(lines=lines)\n",
    "systemMessage=SystemMessagePromptTemplate.from_template(template=\"\"\"\"接下来我会给你一个问题。我要你把它分解成一系列的子问题。每个子问题都应该包含解决它所需的所有信息。\n",
    "确保不要分解过多，也不要有任何无关紧要的子问题--我们会根据分解的简洁性、简明性和正确性来评估你。\n",
    "example:\n",
    "Question: What is Bitcoin?\n",
    "What is the purpose of Bitcoin?\n",
    "What does decentralized mean? \"\"\")\n",
    "userMessage=HumanMessagePromptTemplate.from_template(template=\"\"\"\"{question}\"\"\",)\n",
    "chat_prompt=ChatPromptTemplate.from_messages([systemMessage,\n",
    "                                              userMessage])\n",
    "# template=PromptTemplate(template=\"\"\n",
    "# Question: {question}\\n\"\"\",input_variables=['question'])\n",
    "deLLMChain=LLMChain(prompt=chat_prompt, output_parser=LineListOutputParser(),llm=chatllm,callbacks=[StreamingStdOutCallbackHandler()],verbose=True)\n",
    "for item in deLLMChain.stream({\"question\":\"杭州旅游应该准备什么东西？\"}):\n",
    "    print(item)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def custom_query(query_body: dict, query: str):\n",
    "    \"\"\"Custom query to be used in Elasticsearch.\n",
    "    Args:\n",
    "        query_body (dict): Elasticsearch query body.\n",
    "        query (str): Query string.\n",
    "    Returns:\n",
    "        dict: Elasticsearch query body.\n",
    "    \"\"\"\n",
    "    print(\"Query Retriever created by the retrieval strategy:\")\n",
    "    print(query_body)\n",
    "    print()\n",
    "\n",
    "    new_query_body = {\"query\": {\"match\": {\"text\": query}}}\n",
    "\n",
    "    print(\"Query thats actually used in Elasticsearch:\")\n",
    "    print(new_query_body)\n",
    "    print()\n",
    "\n",
    "    return new_query_body"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_elasticsearch import ElasticsearchStore,ElasticsearchRetriever\n",
    "from    langchain.retrievers.multi_query import MultiQueryRetriever\n",
    "elastic_vector_search = ElasticsearchStore(\n",
    "es_url=\"http://192.168.2.46:9200\",\n",
    "index_name=\"test_2\",\n",
    "embedding=embedding,\n",
    "es_user='elastic',\n",
    "es_password='interdna',\n",
    ")\n",
    "def bm25_query(search_query: str):\n",
    "    print(\"==============bm25_query============\")\n",
    "    return {\n",
    "        \"query\": {\n",
    "            \"match\": {\n",
    "                \"text\": search_query,\n",
    "            },\n",
    "        },\n",
    "    }"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "elastic_vector_search.add_documents(documents=documents)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "elastic_vector_search.similarity_search_with_score('位置',filter=[{\"term\":{\"metadata.id\":1,}}])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "elastic_vector =MultiQueryRetriever(retriever=elastic_vector_search.as_retriever( body_func=bm25_query,), llm_chain=deLLMChain, parser_key=\"lines\",)\n",
    "elastic_vector.invoke(\"位置\", k=4, custom_query=custom_query,)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_core.runnables import ConfigurableField\n",
    "from langchain_elasticsearch.retrievers import ElasticsearchRetriever\n",
    "\n",
    "\n",
    "bm25_retriever = ElasticsearchRetriever.from_es_params(\n",
    "    index_name=\"test_\",\n",
    "    body_func=bm25_query,\n",
    "    content_field='text',\n",
    "    url=\"http://192.168.2.46:9200\",\n",
    "    username ='elastic',\n",
    "    password ='interdna',\n",
    ")\n",
    "\n",
    "bm25_retriever.get_relevant_documents(\"位置\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from    langchain.retrievers.multi_query import MultiQueryRetriever\n",
    "from langchain.retrievers import  EnsembleRetriever\n",
    "from langchain.retrievers.document_compressors import LLMChainFilter \n",
    "from langchain.retrievers.bm25 import BM25Retriever\n",
    "vector_retriever =MultiQueryRetriever( retriever=vector_store.as_retriever(search_params={\"metric_type\": \"COSINE\",\"nprobe\": 12, \"top_k\": 5,}), llm_chain=deLLMChain, parser_key=\"lines\",)\n",
    "# bm25_retriever=BM25Retriever.from_documents(name='test',documents=documents)\n",
    "# elastic_vector =MultiQueryRetriever( retriever=elastic_vector.as_retriever(), llm_chain=deLLMChain, parser_key=\"lines\",)\n",
    "# bm25_retriever=\n",
    "bm25_retriever=MultiQueryRetriever(retriever=bm25_retriever,llm_chain=deLLMChain, parser_key=\"lines\",)\n",
    "ensemble_retriever = EnsembleRetriever(\n",
    "    retrievers=[bm25_retriever, vector_retriever], weights=[0.7, 0.3],\n",
    ")\n",
    "vector_retriever.invoke(input='小学')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "bm25_retriever.invoke('小学')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n",
    "reRankerEmbedding = HuggingFaceEmbeddings(model_name=r\"E:\\models\\BAAIbge-reranker-large\",\n",
    "                                            model_kwargs={'device': \"cuda\"})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.retrievers.document_compressors import FlashrankRerank\n",
    "from flashrank import Ranker\n",
    "ranker = Ranker(model_name=\"ms-marco-MiniLM-L-12-v2\", cache_dir=\"./opt\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.retrievers import ContextualCompressionRetriever\n",
    "from langchain.retrievers.document_compressors import FlashrankRerank,EmbeddingsFilter\n",
    "from flashrank import Ranker,RerankRequest\n",
    "ranker = Ranker(model_name=\"ms-marco-MiniLM-L-12-v2\", cache_dir=\"/opt\")\n",
    "compressor = FlashrankRerank(client=ranker)\n",
    "compression_retriever = ContextualCompressionRetriever(\n",
    "    base_compressor=compressor, base_retriever=ensemble_retriever\n",
    ")\n",
    "compressed_docs = compression_retriever.get_relevant_documents(\n",
    "    \"附近有哪些小学\"\n",
    ")\n",
    "print(compressed_docs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.prompts import PromptTemplate\n",
    "chatTemplate=PromptTemplate(template=\"\"\"楼盘资料\\n{context} user\\n{question}\\n\n",
    "Assistant\\n\"\"\",input_variables=['question'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.chains import RetrievalQA,QAWithSourcesChain\n",
    "from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
    "from langchain.chains import RetrievalQAWithSourcesChain\n",
    "\n",
    "chain = RetrievalQA.from_llm(llm=llm, retriever=compression_retriever,callbacks=[StreamingStdOutCallbackHandler()],prompt=chatTemplate)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "chain.invoke({'query':\"\"\"有哪些学校\"\"\"})\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "chatglm2",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.10"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
