url
stringlengths
30
161
markdown
stringlengths
27
670k
last_modified
stringclasses
1 value
https://github.com/langchain-ai/langchain/blob/master/templates/rag-semi-structured/rag_semi_structured/__init__.py
from rag_semi_structured.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-semi-structured/rag_semi_structured/chain.py
# Load import uuid from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import InMemoryStore from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnablePassthrough from unstructured.partition.pdf import partition_pdf # Path to docs path = "docs" raw_pdf_elements = partition_pdf( filename=path + "/LLaVA.pdf", # Unstructured first finds embedded image blocks extract_images_in_pdf=False, # Use layout model (YOLOX) to get bounding boxes (for tables) and find titles # Titles are any sub-section of the document infer_table_structure=True, # Post processing to aggregate text once we have the title chunking_strategy="by_title", # Chunking params to aggregate text blocks # Attempt to create a new chunk 3800 chars # Attempt to keep chunks > 2000 chars max_characters=4000, new_after_n_chars=3800, combine_text_under_n_chars=2000, image_output_dir_path=path, ) # Categorize by type tables = [] texts = [] for element in raw_pdf_elements: if "unstructured.documents.elements.Table" in str(type(element)): tables.append(str(element)) elif "unstructured.documents.elements.CompositeElement" in str(type(element)): texts.append(str(element)) # Summarize prompt_text = """You are an assistant tasked with summarizing tables and text. \ Give a concise summary of the table or text. Table or text chunk: {element} """ prompt = ChatPromptTemplate.from_template(prompt_text) model = ChatOpenAI(temperature=0, model="gpt-4") summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser() # Apply table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5}) # To save time / cost, only do text summaries if chunk sizes are large # text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5}) # We can just assign text_summaries to the raw texts text_summaries = texts # Use multi vector retriever # The vectorstore to use to index the child chunks vectorstore = Chroma(collection_name="summaries", embedding_function=OpenAIEmbeddings()) # The storage layer for the parent documents store = InMemoryStore() id_key = "doc_id" # The retriever (empty to start) retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key, ) # Add texts doc_ids = [str(uuid.uuid4()) for _ in texts] summary_texts = [ Document(page_content=s, metadata={id_key: doc_ids[i]}) for i, s in enumerate(text_summaries) ] retriever.vectorstore.add_documents(summary_texts) retriever.docstore.mset(list(zip(doc_ids, texts))) # Add tables table_ids = [str(uuid.uuid4()) for _ in tables] summary_tables = [ Document(page_content=s, metadata={id_key: table_ids[i]}) for i, s in enumerate(table_summaries) ] retriever.vectorstore.add_documents(summary_tables) retriever.docstore.mset(list(zip(table_ids, tables))) # RAG # Prompt template template = """Answer the question based only on the following context, which can include text and tables: {context} Question: {question} """ # noqa: E501 prompt = ChatPromptTemplate.from_template(template) # LLM model = ChatOpenAI(temperature=0, model="gpt-4") # RAG pipeline chain = ( {"context": retriever, "question": RunnablePassthrough()} | prompt | model | StrOutputParser() ) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-self-query/README.md
# rag-self-query This template performs RAG using the self-query retrieval technique. The main idea is to let an LLM convert unstructured queries into structured queries. See the [docs for more on how this works](https://python.langchain.com/docs/modules/data_connection/retrievers/self_query). ## Environment Setup In this template we'll use OpenAI models and an Elasticsearch vector store, but the approach generalizes to all LLMs/ChatModels and [a number of vector stores](https://python.langchain.com/docs/integrations/retrievers/self_query/). Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. To connect to your Elasticsearch instance, use the following environment variables: ```bash export ELASTIC_CLOUD_ID = <ClOUD_ID> export ELASTIC_USERNAME = <ClOUD_USERNAME> export ELASTIC_PASSWORD = <ClOUD_PASSWORD> ``` For local development with Docker, use: ```bash export ES_URL = "http://localhost:9200" docker run -p 9200:9200 -e "discovery.type=single-node" -e "xpack.security.enabled=false" -e "xpack.security.http.ssl.enabled=false" docker.elastic.co/elasticsearch/elasticsearch:8.9.0 ``` ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U "langchain-cli[serve]" ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-self-query ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-self-query ``` And add the following code to your `server.py` file: ```python from rag_self_query import chain add_routes(app, chain, path="/rag-elasticsearch") ``` To populate the vector store with the sample data, from the root of the directory run: ```bash python ingest.py ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-elasticsearch/playground](http://127.0.0.1:8000/rag-elasticsearch/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-self-query") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-self-query/ingest.py
import os from langchain_community.document_loaders import JSONLoader from langchain_community.embeddings import OpenAIEmbeddings from langchain_elasticsearch import ElasticsearchStore from langchain_text_splitters import RecursiveCharacterTextSplitter ELASTIC_CLOUD_ID = os.getenv("ELASTIC_CLOUD_ID") ELASTIC_USERNAME = os.getenv("ELASTIC_USERNAME", "elastic") ELASTIC_PASSWORD = os.getenv("ELASTIC_PASSWORD") ES_URL = os.getenv("ES_URL", "http://localhost:9200") ELASTIC_INDEX_NAME = os.getenv("ELASTIC_INDEX_NAME", "workspace-search-example") def _metadata_func(record: dict, metadata: dict) -> dict: metadata["name"] = record.get("name") metadata["summary"] = record.get("summary") metadata["url"] = record.get("url") # give more descriptive name for metadata filtering. metadata["location"] = record.get("category") metadata["updated_at"] = record.get("updated_at") metadata["created_on"] = record.get("created_on") return metadata loader = JSONLoader( file_path="./data/documents.json", jq_schema=".[]", content_key="content", metadata_func=_metadata_func, ) text_splitter = RecursiveCharacterTextSplitter(chunk_size=800, chunk_overlap=250) documents = text_splitter.split_documents(loader.load()) if ELASTIC_CLOUD_ID and ELASTIC_USERNAME and ELASTIC_PASSWORD: es_connection_details = { "es_cloud_id": ELASTIC_CLOUD_ID, "es_user": ELASTIC_USERNAME, "es_password": ELASTIC_PASSWORD, } else: es_connection_details = {"es_url": ES_URL} vecstore = ElasticsearchStore( ELASTIC_INDEX_NAME, embedding=OpenAIEmbeddings(), **es_connection_details, ) vecstore.add_documents(documents)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-self-query/main.py
from rag_self_query import chain if __name__ == "__main__": questions = [ "What is the nasa sales team?", "What is our work from home policy?", "Does the company own my personal project?", "How does compensation work?", ] response = chain.invoke( { "question": questions[0], "chat_history": [], } ) print(response) follow_up_question = "What are their objectives?" response = chain.invoke( { "question": follow_up_question, "chat_history": [ "What is the nasa sales team?", "The sales team of NASA consists of Laura Martinez, the Area " "Vice-President of North America, and Gary Johnson, the Area " "Vice-President of South America. (Sales Organization Overview)", ], } ) print(response)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-self-query/rag_self_query/__init__.py
from rag_self_query.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-self-query/rag_self_query/chain.py
import os from operator import itemgetter from typing import List, Tuple from langchain.retrievers import SelfQueryRetriever from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import format_document from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import RunnableParallel, RunnablePassthrough from langchain_elasticsearch.vectorstores import ElasticsearchStore from .prompts import CONDENSE_QUESTION_PROMPT, DOCUMENT_PROMPT, LLM_CONTEXT_PROMPT ELASTIC_CLOUD_ID = os.getenv("ELASTIC_CLOUD_ID") ELASTIC_USERNAME = os.getenv("ELASTIC_USERNAME", "elastic") ELASTIC_PASSWORD = os.getenv("ELASTIC_PASSWORD") ES_URL = os.getenv("ES_URL", "http://localhost:9200") ELASTIC_INDEX_NAME = os.getenv("ELASTIC_INDEX_NAME", "workspace-search-example") if ELASTIC_CLOUD_ID and ELASTIC_USERNAME and ELASTIC_PASSWORD: es_connection_details = { "es_cloud_id": ELASTIC_CLOUD_ID, "es_user": ELASTIC_USERNAME, "es_password": ELASTIC_PASSWORD, } else: es_connection_details = {"es_url": ES_URL} vecstore = ElasticsearchStore( ELASTIC_INDEX_NAME, embedding=OpenAIEmbeddings(), **es_connection_details, ) document_contents = "The purpose and specifications of a workplace policy." metadata_field_info = [ {"name": "name", "type": "string", "description": "Name of the workplace policy."}, { "name": "created_on", "type": "date", "description": "The date the policy was created in ISO 8601 date format (YYYY-MM-DD).", # noqa: E501 }, { "name": "updated_at", "type": "date", "description": "The date the policy was last updated in ISO 8601 date format (YYYY-MM-DD).", # noqa: E501 }, { "name": "location", "type": "string", "description": "Where the policy text is stored. The only valid values are ['github', 'sharepoint'].", # noqa: E501 }, ] llm = ChatOpenAI(temperature=0) retriever = SelfQueryRetriever.from_llm( llm, vecstore, document_contents, metadata_field_info ) def _combine_documents(docs: List) -> str: return "\n\n".join(format_document(doc, prompt=DOCUMENT_PROMPT) for doc in docs) def _format_chat_history(chat_history: List[Tuple]) -> str: return "\n".join(f"Human: {human}\nAssistant: {ai}" for human, ai in chat_history) class InputType(BaseModel): question: str chat_history: List[Tuple[str, str]] = Field(default_factory=list) standalone_question = ( { "question": itemgetter("question"), "chat_history": lambda x: _format_chat_history(x["chat_history"]), } | CONDENSE_QUESTION_PROMPT | llm | StrOutputParser() ) def route_question(input): if input.get("chat_history"): return standalone_question else: return RunnablePassthrough() _context = RunnableParallel( context=retriever | _combine_documents, question=RunnablePassthrough(), ) chain = ( standalone_question | _context | LLM_CONTEXT_PROMPT | llm | StrOutputParser() ).with_types(input_type=InputType)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-self-query/rag_self_query/prompts.py
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate # Used to condense a question and chat history into a single question condense_question_prompt_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language. If there is no chat history, just rephrase the question to be a standalone question. Chat History: {chat_history} Follow Up Input: {question} """ # noqa: E501 CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template( condense_question_prompt_template ) # RAG Prompt to provide the context and question for LLM to answer # We also ask the LLM to cite the source of the passage it is answering from llm_context_prompt_template = """ Use the following passages to answer the user's question. Each passage has a SOURCE which is the title of the document. When answering, cite source name of the passages you are answering from below the answer in a unique bullet point list. If you don't know the answer, just say that you don't know, don't try to make up an answer. ---- {context} ---- Question: {question} """ # noqa: E501 LLM_CONTEXT_PROMPT = ChatPromptTemplate.from_template(llm_context_prompt_template) # Used to build a context window from passages retrieved document_prompt_template = """ --- NAME: {name} PASSAGE: {page_content} --- """ DOCUMENT_PROMPT = PromptTemplate.from_template(document_prompt_template)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-redis/README.md
# rag-redis This template performs RAG using Redis (vector database) and OpenAI (LLM) on financial 10k filings docs for Nike. It relies on the sentence transformer `all-MiniLM-L6-v2` for embedding chunks of the pdf and user questions. ## Environment Setup Set the `OPENAI_API_KEY` environment variable to access the [OpenAI](https://platform.openai.com) models: ```bash export OPENAI_API_KEY= <YOUR OPENAI API KEY> ``` Set the following [Redis](https://redis.com/try-free) environment variables: ```bash export REDIS_HOST = <YOUR REDIS HOST> export REDIS_PORT = <YOUR REDIS PORT> export REDIS_USER = <YOUR REDIS USER NAME> export REDIS_PASSWORD = <YOUR REDIS PASSWORD> ``` ## Supported Settings We use a variety of environment variables to configure this application | Environment Variable | Description | Default Value | |----------------------|-----------------------------------|---------------| | `DEBUG` | Enable or disable Langchain debugging logs | True | | `REDIS_HOST` | Hostname for the Redis server | "localhost" | | `REDIS_PORT` | Port for the Redis server | 6379 | | `REDIS_USER` | User for the Redis server | "" | | `REDIS_PASSWORD` | Password for the Redis server | "" | | `REDIS_URL` | Full URL for connecting to Redis | `None`, Constructed from user, password, host, and port if not provided | | `INDEX_NAME` | Name of the vector index | "rag-redis" | ## Usage To use this package, you should first have the LangChain CLI and Pydantic installed in a Python virtual environment: ```shell pip install -U langchain-cli pydantic==1.10.13 ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-redis ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-redis ``` And add the following code snippet to your `app/server.py` file: ```python from rag_redis.chain import chain as rag_redis_chain add_routes(app, rag_redis_chain, path="/rag-redis") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-redis/playground](http://127.0.0.1:8000/rag-redis/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-redis") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-redis/ingest.py
import os from langchain_community.document_loaders import UnstructuredFileLoader from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import Redis from langchain_text_splitters import RecursiveCharacterTextSplitter from rag_redis.config import EMBED_MODEL, INDEX_NAME, INDEX_SCHEMA, REDIS_URL def ingest_documents(): """ Ingest PDF to Redis from the data/ directory that contains Edgar 10k filings data for Nike. """ # Load list of pdfs company_name = "Nike" data_path = "data/" doc = [os.path.join(data_path, file) for file in os.listdir(data_path)][0] print("Parsing 10k filing doc for NIKE", doc) text_splitter = RecursiveCharacterTextSplitter( chunk_size=1500, chunk_overlap=100, add_start_index=True ) loader = UnstructuredFileLoader(doc, mode="single", strategy="fast") chunks = loader.load_and_split(text_splitter) print("Done preprocessing. Created", len(chunks), "chunks of the original pdf") # Create vectorstore embedder = HuggingFaceEmbeddings(model_name=EMBED_MODEL) _ = Redis.from_texts( # appending this little bit can sometimes help with semantic retrieval # especially with multiple companies texts=[f"Company: {company_name}. " + chunk.page_content for chunk in chunks], metadatas=[chunk.metadata for chunk in chunks], embedding=embedder, index_name=INDEX_NAME, index_schema=INDEX_SCHEMA, redis_url=REDIS_URL, ) if __name__ == "__main__": ingest_documents()
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-redis/rag_redis.ipynb
{ "cells": [ { "cell_type": "markdown", "id": "681a5d1e", "metadata": {}, "source": [ "## Connect to RAG App\n", "\n", "Assuming you are already running this server:\n", "```bash\n", "langserve start\n", "```" ] }, { "cell_type": "code", "execution_count": 37, "id": "d774be2a", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Nike's revenue in 2023 was $51.2 billion. \n", "\n", "Source: 'data/nke-10k-2023.pdf', Start Index: '146100'\n" ] } ], "source": [ "from langserve.client import RemoteRunnable\n", "\n", "rag_redis = RemoteRunnable(\"http://localhost:8000/rag-redis\")\n", "\n", "print(rag_redis.invoke(\"What was Nike's revenue in 2023?\"))" ] }, { "cell_type": "code", "execution_count": 43, "id": "07ae0005", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "As of May 31, 2023, Nike had approximately 83,700 employees worldwide. This information can be found in the first piece of context provided. (source: data/nke-10k-2023.pdf, start_index: 32532)\n" ] } ], "source": [ "print(rag_redis.invoke(\"How many employees work at Nike?\"))" ] }, { "cell_type": "code", "execution_count": null, "id": "4a6b9f00", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" } }, "nbformat": 4, "nbformat_minor": 5 }
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-redis/rag_redis/chain.py
from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import Redis from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough from rag_redis.config import ( EMBED_MODEL, INDEX_NAME, INDEX_SCHEMA, REDIS_URL, ) # Make this look better in the docs. class Question(BaseModel): __root__: str # Init Embeddings embedder = HuggingFaceEmbeddings(model_name=EMBED_MODEL) # Connect to pre-loaded vectorstore # run the ingest.py script to populate this vectorstore = Redis.from_existing_index( embedding=embedder, index_name=INDEX_NAME, schema=INDEX_SCHEMA, redis_url=REDIS_URL ) # TODO allow user to change parameters retriever = vectorstore.as_retriever(search_type="mmr") # Define our prompt template = """ Use the following pieces of context from Nike's financial 10k filings dataset to answer the question. Do not make up an answer if there is no context provided to help answer it. Include the 'source' and 'start_index' from the metadata included in the context you used to answer the question Context: --------- {context} --------- Question: {question} --------- Answer: """ prompt = ChatPromptTemplate.from_template(template) # RAG Chain model = ChatOpenAI(model="gpt-3.5-turbo-16k") chain = ( RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) | prompt | model | StrOutputParser() ).with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-redis/rag_redis/config.py
import os def get_boolean_env_var(var_name, default_value=False): """Retrieve the boolean value of an environment variable. Args: var_name (str): The name of the environment variable to retrieve. default_value (bool): The default value to return if the variable is not found. Returns: bool: The value of the environment variable, interpreted as a boolean. """ true_values = {"true", "1", "t", "y", "yes"} false_values = {"false", "0", "f", "n", "no"} # Retrieve the environment variable's value value = os.getenv(var_name, "").lower() # Decide the boolean value based on the content of the string if value in true_values: return True elif value in false_values: return False else: return default_value # Check for openai API key if "OPENAI_API_KEY" not in os.environ: raise Exception("Must provide an OPENAI_API_KEY as an env var.") # Whether or not to enable langchain debugging DEBUG = get_boolean_env_var("DEBUG", False) # Set DEBUG env var to "true" if you wish to enable LC debugging module if DEBUG: import langchain langchain.debug = True # Embedding model EMBED_MODEL = os.getenv("EMBED_MODEL", "sentence-transformers/all-MiniLM-L6-v2") # Redis Connection Information REDIS_HOST = os.getenv("REDIS_HOST", "localhost") REDIS_PORT = int(os.getenv("REDIS_PORT", 6379)) def format_redis_conn_from_env(): redis_url = os.getenv("REDIS_URL", None) if redis_url: return redis_url else: using_ssl = get_boolean_env_var("REDIS_SSL", False) start = "rediss://" if using_ssl else "redis://" # if using RBAC password = os.getenv("REDIS_PASSWORD", None) username = os.getenv("REDIS_USERNAME", "default") if password is not None: start += f"{username}:{password}@" return start + f"{REDIS_HOST}:{REDIS_PORT}" REDIS_URL = format_redis_conn_from_env() # Vector Index Configuration INDEX_NAME = os.getenv("INDEX_NAME", "rag-redis") current_file_path = os.path.abspath(__file__) parent_dir = os.path.dirname(current_file_path) schema_path = os.path.join(parent_dir, "schema.yml") INDEX_SCHEMA = schema_path
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-redis-multi-modal-multi-vector/README.md
# rag-redis-multi-modal-multi-vector Multi-modal LLMs enable visual assistants that can perform question-answering about images. This template create a visual assistant for slide decks, which often contain visuals such as graphs or figures. It uses GPT-4V to create image summaries for each slide, embeds the summaries, and stores them in Redis. Given a question, relevant slides are retrieved and passed to GPT-4V for answer synthesis. ![](RAG-architecture.png) ## Input Supply a slide deck as PDF in the `/docs` directory. By default, this template has a slide deck about recent earnings from NVIDIA. Example questions to ask can be: ``` 1/ how much can H100 TensorRT improve LLama2 inference performance? 2/ what is the % change in GPU accelerated applications from 2020 to 2023? ``` To create an index of the slide deck, run: ``` poetry install poetry shell python ingest.py ``` ## Storage Here is the process the template will use to create an index of the slides (see [blog](https://blog.langchain.dev/multi-modal-rag-template/)): * Extract the slides as a collection of images * Use GPT-4V to summarize each image * Embed the image summaries using text embeddings with a link to the original images * Retrieve relevant image based on similarity between the image summary and the user input question * Pass those images to GPT-4V for answer synthesis ### Redis This template uses [Redis](https://redis.com) to power the [MultiVectorRetriever](https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector) including: - Redis as the [VectorStore](https://python.langchain.com/docs/integrations/vectorstores/redis) (to store + index image summary embeddings) - Redis as the [ByteStore](https://python.langchain.com/docs/integrations/stores/redis) (to store images) Make sure to deploy a Redis instance either in the [cloud](https://redis.com/try-free) (free) or locally with [docker](https://redis.io/docs/install/install-stack/docker/). This will give you an accessible Redis endpoint that you can use as a URL. If deploying locally, simply use `redis://localhost:6379`. ## LLM The app will retrieve images based on similarity between the text input and the image summary (text), and pass the images to GPT-4V for answer synthesis. ## Environment Setup Set the `OPENAI_API_KEY` environment variable to access the OpenAI GPT-4V. Set `REDIS_URL` environment variable to access your Redis database. ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-redis-multi-modal-multi-vector ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-redis-multi-modal-multi-vector ``` And add the following code to your `server.py` file: ```python from rag_redis_multi_modal_multi_vector import chain as rag_redis_multi_modal_chain_mv add_routes(app, rag_redis_multi_modal_chain_mv, path="/rag-redis-multi-modal-multi-vector") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-redis-multi-modal-multi-vector/playground](http://127.0.0.1:8000/rag-redis-multi-modal-multi-vector/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-redis-multi-modal-multi-vector") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-redis-multi-modal-multi-vector/ingest.py
import base64 import io import uuid from io import BytesIO from pathlib import Path import pypdfium2 as pdfium from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_openai.chat_models import ChatOpenAI from PIL import Image from rag_redis_multi_modal_multi_vector.utils import ID_KEY, make_mv_retriever def image_summarize(img_base64, prompt): """ Make image summary :param img_base64: Base64 encoded string for image :param prompt: Text prompt for summarizatiomn :return: Image summarization prompt """ chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=1024) msg = chat.invoke( [ HumanMessage( content=[ {"type": "text", "text": prompt}, { "type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{img_base64}"}, }, ] ) ] ) return msg.content def generate_img_summaries(img_base64_list): """ Generate summaries for images :param img_base64_list: Base64 encoded images :return: List of image summaries and processed images """ # Store image summaries image_summaries = [] processed_images = [] # Prompt prompt = """You are an assistant tasked with summarizing images for retrieval. \ These summaries will be embedded and used to retrieve the raw image. \ Give a concise summary of the image that is well optimized for retrieval.""" # Apply summarization to images for i, base64_image in enumerate(img_base64_list): try: image_summaries.append(image_summarize(base64_image, prompt)) processed_images.append(base64_image) except Exception as e: print(f"Error with image {i+1}: {e}") return image_summaries, processed_images def get_images_from_pdf(pdf_path): """ Extract images from each page of a PDF document and save as JPEG files. :param pdf_path: A string representing the path to the PDF file. """ pdf = pdfium.PdfDocument(pdf_path) n_pages = len(pdf) pil_images = [] for page_number in range(n_pages): page = pdf.get_page(page_number) bitmap = page.render(scale=1, rotation=0, crop=(0, 0, 0, 0)) pil_image = bitmap.to_pil() pil_images.append(pil_image) return pil_images def resize_base64_image(base64_string, size=(128, 128)): """ Resize an image encoded as a Base64 string :param base64_string: Base64 string :param size: Image size :return: Re-sized Base64 string """ # Decode the Base64 string img_data = base64.b64decode(base64_string) img = Image.open(io.BytesIO(img_data)) # Resize the image resized_img = img.resize(size, Image.LANCZOS) # Save the resized image to a bytes buffer buffered = io.BytesIO() resized_img.save(buffered, format=img.format) # Encode the resized image to Base64 return base64.b64encode(buffered.getvalue()).decode("utf-8") def convert_to_base64(pil_image): """ Convert PIL images to Base64 encoded strings :param pil_image: PIL image :return: Re-sized Base64 string """ buffered = BytesIO() pil_image.save(buffered, format="JPEG") # You can change the format if needed img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") img_str = resize_base64_image(img_str, size=(960, 540)) return img_str def load_images(image_summaries, images): """ Index image summaries in the db. :param image_summaries: Image summaries :param images: Base64 encoded images :return: Retriever """ retriever = make_mv_retriever() # Helper function to add documents to the vectorstore and docstore def add_documents(retriever, doc_summaries, doc_contents): doc_ids = [str(uuid.uuid4()) for _ in doc_contents] summary_docs = [ Document(page_content=s, metadata={ID_KEY: doc_ids[i]}) for i, s in enumerate(doc_summaries) ] retriever.vectorstore.add_documents(summary_docs) retriever.docstore.mset(list(zip(doc_ids, doc_contents))) add_documents(retriever, image_summaries, images) if __name__ == "__main__": doc_path = Path(__file__).parent / "docs/nvda-f3q24-investor-presentation-final.pdf" rel_doc_path = doc_path.relative_to(Path.cwd()) print("Extract slides as images") pil_images = get_images_from_pdf(rel_doc_path) # Convert to b64 images_base_64 = [convert_to_base64(i) for i in pil_images] # Generate image summaries print("Generate image summaries") image_summaries, images_base_64_processed = generate_img_summaries(images_base_64) # Create documents images_base_64_processed_documents = [ Document(page_content=i) for i in images_base_64_processed ] # Create retriever and load images load_images(image_summaries, images_base_64_processed_documents)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-redis-multi-modal-multi-vector/rag_redis_multi_modal_multi_vector/__init__.py
from rag_redis_multi_modal_multi_vector.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-redis-multi-modal-multi-vector/rag_redis_multi_modal_multi_vector/chain.py
import base64 import io from langchain.pydantic_v1 import BaseModel from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnableLambda, RunnablePassthrough from langchain_openai import ChatOpenAI from PIL import Image from rag_redis_multi_modal_multi_vector.utils import make_mv_retriever def resize_base64_image(base64_string, size=(128, 128)): """ Resize an image encoded as a Base64 string. :param base64_string: A Base64 encoded string of the image to be resized. :param size: A tuple representing the new size (width, height) for the image. :return: A Base64 encoded string of the resized image. """ img_data = base64.b64decode(base64_string) img = Image.open(io.BytesIO(img_data)) resized_img = img.resize(size, Image.LANCZOS) buffered = io.BytesIO() resized_img.save(buffered, format=img.format) return base64.b64encode(buffered.getvalue()).decode("utf-8") def get_resized_images(docs): """ Resize images from base64-encoded strings. :param docs: A list of base64-encoded image to be resized. :return: Dict containing a list of resized base64-encoded strings. """ b64_images = [] for doc in docs: if isinstance(doc, Document): doc = doc.page_content resized_image = resize_base64_image(doc, size=(1280, 720)) b64_images.append(resized_image) return {"images": b64_images} def img_prompt_func(data_dict, num_images=2): """ GPT-4V prompt for image analysis. :param data_dict: A dict with images and a user-provided question. :param num_images: Number of images to include in the prompt. :return: A list containing message objects for each image and the text prompt. """ messages = [] if data_dict["context"]["images"]: for image in data_dict["context"]["images"][:num_images]: messages.append( { "type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image}"}, } ) text_message = { "type": "text", "text": ( "You are an analyst tasked with answering questions about visual content.\n" "You will be give a set of image(s) from a slide deck / presentation.\n" "Use this information to answer the user question. \n" f"User-provided question: {data_dict['question']}\n\n" ), } messages.append(text_message) return [HumanMessage(content=messages)] def multi_modal_rag_chain(): """ Multi-modal RAG chain, :return: A chain of functions representing the multi-modal RAG process. """ # Initialize the multi-modal Large Language Model with specific parameters model = ChatOpenAI(temperature=0, model="gpt-4-vision-preview", max_tokens=1024) # Initialize the retriever retriever = make_mv_retriever() # Define the RAG pipeline return ( { "context": retriever | RunnableLambda(get_resized_images), "question": RunnablePassthrough(), } | RunnableLambda(img_prompt_func) | model | StrOutputParser() ) # Create RAG chain chain = multi_modal_rag_chain() # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-redis-multi-modal-multi-vector/rag_redis_multi_modal_multi_vector/utils.py
import os from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain_community.storage import RedisStore from langchain_community.vectorstores import Redis as RedisVectorDB from langchain_openai.embeddings import OpenAIEmbeddings ID_KEY = "doc_id" def get_boolean_env_var(var_name, default_value=False): """Retrieve the boolean value of an environment variable. Args: var_name (str): The name of the environment variable to retrieve. default_value (bool): The default value to return if the variable is not found. Returns: bool: The value of the environment variable, interpreted as a boolean. """ true_values = {"true", "1", "t", "y", "yes"} false_values = {"false", "0", "f", "n", "no"} # Retrieve the environment variable's value value = os.getenv(var_name, "").lower() # Decide the boolean value based on the content of the string if value in true_values: return True elif value in false_values: return False else: return default_value # Check for openai API key if "OPENAI_API_KEY" not in os.environ: raise Exception("Must provide an OPENAI_API_KEY as an env var.") def format_redis_conn_from_env() -> str: redis_url = os.getenv("REDIS_URL", None) if redis_url: return redis_url else: using_ssl = get_boolean_env_var("REDIS_SSL", False) start = "rediss://" if using_ssl else "redis://" # if using RBAC password = os.getenv("REDIS_PASSWORD", None) username = os.getenv("REDIS_USERNAME", "default") if password is not None: start += f"{username}:{password}@" host = os.getenv("REDIS_HOST", "localhost") port = int(os.getenv("REDIS_PORT", 6379)) return start + f"{host}:{port}" REDIS_URL = format_redis_conn_from_env() current_file_path = os.path.abspath(__file__) parent_dir = os.path.dirname(current_file_path) schema_path = os.path.join(parent_dir, "schema.yml") INDEX_SCHEMA = schema_path def make_mv_retriever(): """Create the multi-vector retriever""" # Load Redis REDIS_URL = os.getenv("REDIS_URL", "redis://localhost:6379") vectorstore = RedisVectorDB( redis_url=REDIS_URL, index_name="image_summaries", key_prefix="summary", index_schema=INDEX_SCHEMA, embedding=OpenAIEmbeddings(), ) store = RedisStore(redis_url=REDIS_URL, namespace="image") # Create the multi-vector retriever return MultiVectorRetriever( vectorstore=vectorstore, byte_store=store, id_key=ID_KEY, )
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-pinecone/README.md
# rag-pinecone This template performs RAG using Pinecone and OpenAI. ## Environment Setup This template uses Pinecone as a vectorstore and requires that `PINECONE_API_KEY`, `PINECONE_ENVIRONMENT`, and `PINECONE_INDEX` are set. Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-pinecone ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-pinecone ``` And add the following code to your `server.py` file: ```python from rag_pinecone import chain as rag_pinecone_chain # Be careful with this, in the console, when you create the project add_routes(app, rag_pinecone_chain, path="\rag-pinecone") appears with a backslash, the correct route is "/rag-pinecone" add_routes(app, rag_pinecone_chain, path="/rag-pinecone") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-pinecone/playground](http://127.0.0.1:8000/rag-pinecone/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-pinecone") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-pinecone/rag_pinecone.ipynb
{ "cells": [ { "cell_type": "markdown", "id": "681a5d1e", "metadata": {}, "source": [ "## Connect to template\n", "\n", "In `server.py`, set -\n", "```\n", "add_routes(app, chain_ext, path=\"/rag_pinecone\")\n", "```" ] }, { "cell_type": "code", "execution_count": null, "id": "d774be2a", "metadata": {}, "outputs": [], "source": [ "from langserve.client import RemoteRunnable\n", "\n", "rag_app_pinecone = RemoteRunnable(\"http://0.0.0.0:8001/rag_pinecone\")\n", "rag_app_pinecone.invoke(\"How does agent memory work?\")" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.16" } }, "nbformat": 4, "nbformat_minor": 5 }
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-pinecone/rag_pinecone/__init__.py
from rag_pinecone.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-pinecone/rag_pinecone/chain.py
import os from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough from langchain_pinecone import PineconeVectorStore if os.environ.get("PINECONE_API_KEY", None) is None: raise Exception("Missing `PINECONE_API_KEY` environment variable.") if os.environ.get("PINECONE_ENVIRONMENT", None) is None: raise Exception("Missing `PINECONE_ENVIRONMENT` environment variable.") PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test") ### Ingest code - you may need to run this the first time # Load # from langchain_community.document_loaders import WebBaseLoader # loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") # data = loader.load() # # Split # from langchain_text_splitters import RecursiveCharacterTextSplitter # text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) # all_splits = text_splitter.split_documents(data) # # Add to vectorDB # vectorstore = PineconeVectorStore.from_documents( # documents=all_splits, embedding=OpenAIEmbeddings(), index_name=PINECONE_INDEX_NAME # ) # retriever = vectorstore.as_retriever() vectorstore = PineconeVectorStore.from_existing_index( PINECONE_INDEX_NAME, OpenAIEmbeddings() ) retriever = vectorstore.as_retriever() # RAG prompt template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) # RAG model = ChatOpenAI() chain = ( RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) | prompt | model | StrOutputParser() ) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-pinecone-rerank/README.md
# rag-pinecone-rerank This template performs RAG using Pinecone and OpenAI along with [Cohere to perform re-ranking](https://txt.cohere.com/rerank/) on returned documents. Re-ranking provides a way to rank retrieved documents using specified filters or criteria. ## Environment Setup This template uses Pinecone as a vectorstore and requires that `PINECONE_API_KEY`, `PINECONE_ENVIRONMENT`, and `PINECONE_INDEX` are set. Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. Set the `COHERE_API_KEY` environment variable to access the Cohere ReRank. ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-pinecone-rerank ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-pinecone-rerank ``` And add the following code to your `server.py` file: ```python from rag_pinecone_rerank import chain as rag_pinecone_rerank_chain add_routes(app, rag_pinecone_rerank_chain, path="/rag-pinecone-rerank") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-pinecone-rerank/playground](http://127.0.0.1:8000/rag-pinecone-rerank/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-pinecone-rerank") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-pinecone-rerank/rag_pinecone_rerank.ipynb
{ "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "681a5d1e", "metadata": {}, "source": [ "## Connect to template" ] }, { "cell_type": "code", "execution_count": 3, "id": "d774be2a", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'The agent memory consists of two components: short-term memory and long-term memory. The short-term memory is used for in-context learning and allows the model to learn from its experiences. The long-term memory enables the agent to retain and recall an infinite amount of information over extended periods by leveraging an external vector store and fast retrieval.'" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from langserve.client import RemoteRunnable\n", "\n", "rag_app_pinecone = RemoteRunnable(\"http://localhost:8001/rag_pinecone_rerank\")\n", "rag_app_pinecone.invoke(\"How does agent memory work?\")" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.16" } }, "nbformat": 4, "nbformat_minor": 5 }
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-pinecone-rerank/rag_pinecone_rerank/__init__.py
from rag_pinecone_rerank.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-pinecone-rerank/rag_pinecone_rerank/chain.py
import os from langchain.retrievers import ContextualCompressionRetriever from langchain.retrievers.document_compressors import CohereRerank from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough from langchain_pinecone import PineconeVectorStore if os.environ.get("PINECONE_API_KEY", None) is None: raise Exception("Missing `PINECONE_API_KEY` environment variable.") if os.environ.get("PINECONE_ENVIRONMENT", None) is None: raise Exception("Missing `PINECONE_ENVIRONMENT` environment variable.") PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test") ### Ingest code - you may need to run this the first time # # Load # from langchain_community.document_loaders import WebBaseLoader # loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") # data = loader.load() # # Split # from langchain_text_splitters import RecursiveCharacterTextSplitter # text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) # all_splits = text_splitter.split_documents(data) # # Add to vectorDB # vectorstore = PineconeVectorStore.from_documents( # documents=all_splits, embedding=OpenAIEmbeddings(), index_name=PINECONE_INDEX_NAME # ) # retriever = vectorstore.as_retriever() vectorstore = PineconeVectorStore.from_existing_index( PINECONE_INDEX_NAME, OpenAIEmbeddings() ) # Get k=10 docs retriever = vectorstore.as_retriever(search_kwargs={"k": 10}) # Re-rank compressor = CohereRerank() compression_retriever = ContextualCompressionRetriever( base_compressor=compressor, base_retriever=retriever ) # RAG prompt template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) # RAG model = ChatOpenAI() chain = ( RunnableParallel( {"context": compression_retriever, "question": RunnablePassthrough()} ) | prompt | model | StrOutputParser() ) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-pinecone-multi-query/README.md
# rag-pinecone-multi-query This template performs RAG using Pinecone and OpenAI with a multi-query retriever. It uses an LLM to generate multiple queries from different perspectives based on the user's input query. For each query, it retrieves a set of relevant documents and takes the unique union across all queries for answer synthesis. ## Environment Setup This template uses Pinecone as a vectorstore and requires that `PINECONE_API_KEY`, `PINECONE_ENVIRONMENT`, and `PINECONE_INDEX` are set. Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. ## Usage To use this package, you should first install the LangChain CLI: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this package, do: ```shell langchain app new my-app --package rag-pinecone-multi-query ``` To add this package to an existing project, run: ```shell langchain app add rag-pinecone-multi-query ``` And add the following code to your `server.py` file: ```python from rag_pinecone_multi_query import chain as rag_pinecone_multi_query_chain add_routes(app, rag_pinecone_multi_query_chain, path="/rag-pinecone-multi-query") ``` (Optional) Now, let's configure LangSmith. LangSmith will help us trace, monitor, and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server running locally at [http://localhost:8000](http://localhost:8000) You can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) You can access the playground at [http://127.0.0.1:8000/rag-pinecone-multi-query/playground](http://127.0.0.1:8000/rag-pinecone-multi-query/playground) To access the template from code, use: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-pinecone-multi-query") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-pinecone-multi-query/rag_pinecone_multi_query.ipynb
{ "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "681a5d1e", "metadata": {}, "source": [ "## Connect to template\n", "\n", "In `server.py`, set -\n", "```\n", "add_routes(app, chain_ext, path=\"/rag_pinecone_multi_query\")\n", "```" ] }, { "cell_type": "code", "execution_count": 8, "id": "d774be2a", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'The different types of agent memory mentioned in the context are short-term memory, long-term memory, explicit/declarative memory, and implicit/procedural memory.'" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from langserve.client import RemoteRunnable\n", "\n", "rag_app_pinecone = RemoteRunnable(\"http://0.0.0.0:8001/rag_pinecone_multi_query\")\n", "rag_app_pinecone.invoke(\"What are the different types of agent memory\")" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.16" } }, "nbformat": 4, "nbformat_minor": 5 }
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-pinecone-multi-query/rag_pinecone_multi_query/__init__.py
from rag_pinecone_multi_query.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-pinecone-multi-query/rag_pinecone_multi_query/chain.py
import os from langchain.retrievers.multi_query import MultiQueryRetriever from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough from langchain_pinecone import PineconeVectorStore if os.environ.get("PINECONE_API_KEY", None) is None: raise Exception("Missing `PINECONE_API_KEY` environment variable.") if os.environ.get("PINECONE_ENVIRONMENT", None) is None: raise Exception("Missing `PINECONE_ENVIRONMENT` environment variable.") PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test") ### Ingest code - you may need to run this the first time # Load # from langchain_community.document_loaders import WebBaseLoader # loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") # data = loader.load() # # Split # from langchain_text_splitters import RecursiveCharacterTextSplitter # text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) # all_splits = text_splitter.split_documents(data) # # Add to vectorDB # vectorstore = PineconeVectorStore.from_documents( # documents=all_splits, embedding=OpenAIEmbeddings(), index_name=PINECONE_INDEX_NAME # ) # retriever = vectorstore.as_retriever() # Set up index with multi query retriever vectorstore = PineconeVectorStore.from_existing_index( PINECONE_INDEX_NAME, OpenAIEmbeddings() ) model = ChatOpenAI(temperature=0) retriever = MultiQueryRetriever.from_llm( retriever=vectorstore.as_retriever(), llm=model ) # RAG prompt template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) # RAG model = ChatOpenAI() chain = ( RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) | prompt | model | StrOutputParser() ) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-opensearch/README.md
# rag-opensearch This Template performs RAG using [OpenSearch](https://python.langchain.com/docs/integrations/vectorstores/opensearch). ## Environment Setup Set the following environment variables. - `OPENAI_API_KEY` - To access OpenAI Embeddings and Models. And optionally set the OpenSearch ones if not using defaults: - `OPENSEARCH_URL` - URL of the hosted OpenSearch Instance - `OPENSEARCH_USERNAME` - User name for the OpenSearch instance - `OPENSEARCH_PASSWORD` - Password for the OpenSearch instance - `OPENSEARCH_INDEX_NAME` - Name of the index To run the default OpenSeach instance in docker, you can use the command ```shell docker run -p 9200:9200 -p 9600:9600 -e "discovery.type=single-node" --name opensearch-node -d opensearchproject/opensearch:latest ``` Note: To load dummy index named `langchain-test` with dummy documents, run `python dummy_index_setup.py` in the package ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-opensearch ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-opensearch ``` And add the following code to your `server.py` file: ```python from rag_opensearch import chain as rag_opensearch_chain add_routes(app, rag_opensearch_chain, path="/rag-opensearch") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-opensearch/playground](http://127.0.0.1:8000/rag-opensearch/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-opensearch") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-opensearch/dummy_index_setup.py
import os from openai import OpenAI from opensearchpy import OpenSearch OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") OPENSEARCH_URL = os.getenv("OPENSEARCH_URL", "https://localhost:9200") OPENSEARCH_USERNAME = os.getenv("OPENSEARCH_USERNAME", "admin") OPENSEARCH_PASSWORD = os.getenv("OPENSEARCH_PASSWORD", "admin") OPENSEARCH_INDEX_NAME = os.getenv("OPENSEARCH_INDEX_NAME", "langchain-test") with open("dummy_data.txt") as f: docs = [line.strip() for line in f.readlines()] client_oai = OpenAI(api_key=OPENAI_API_KEY) client = OpenSearch( hosts=[OPENSEARCH_URL], http_auth=(OPENSEARCH_USERNAME, OPENSEARCH_PASSWORD), use_ssl=True, verify_certs=False, ) # Define the index settings and mappings index_settings = { "settings": { "index": {"knn": True, "number_of_shards": 1, "number_of_replicas": 0} }, "mappings": { "properties": { "vector_field": { "type": "knn_vector", "dimension": 1536, "method": {"name": "hnsw", "space_type": "l2", "engine": "faiss"}, } } }, } response = client.indices.create(index=OPENSEARCH_INDEX_NAME, body=index_settings) print(response) # Insert docs for each in docs: res = client_oai.embeddings.create(input=each, model="text-embedding-ada-002") document = { "vector_field": res.data[0].embedding, "text": each, } response = client.index(index=OPENSEARCH_INDEX_NAME, body=document, refresh=True) print(response)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-opensearch/rag_opensearch.ipynb
{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "## Connect to template\n", "\n", "In `server.py`, set -\n", "```\n", "add_routes(app, chain_ext, path=\"/rag_opensearch\")\n", "```" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from langserve.client import RemoteRunnable\n", "\n", "rag_app = RemoteRunnable(\"http://localhost:8001/rag-opensearch\")\n", "rag_app.invoke(\"What is the ip address used in the image processing logs\")" ] } ], "metadata": { "language_info": { "name": "python" } }, "nbformat": 4, "nbformat_minor": 2 }
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-opensearch/rag_opensearch/__init__.py
from rag_opensearch.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-opensearch/rag_opensearch/chain.py
import os from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores.opensearch_vector_search import ( OpenSearchVectorSearch, ) from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") OPENSEARCH_URL = os.getenv("OPENSEARCH_URL", "https://localhost:9200") OPENSEARCH_USERNAME = os.getenv("OPENSEARCH_USERNAME", "admin") OPENSEARCH_PASSWORD = os.getenv("OPENSEARCH_PASSWORD", "admin") OPENSEARCH_INDEX_NAME = os.getenv("OPENSEARCH_INDEX_NAME", "langchain-test") embedding_function = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY) vector_store = OpenSearchVectorSearch( opensearch_url=OPENSEARCH_URL, http_auth=(OPENSEARCH_USERNAME, OPENSEARCH_PASSWORD), index_name=OPENSEARCH_INDEX_NAME, embedding_function=embedding_function, verify_certs=False, ) retriever = vector_store.as_retriever() def format_docs(docs): return "\n\n".join([d.page_content for d in docs]) # RAG prompt template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) # RAG model = ChatOpenAI(openai_api_key=OPENAI_API_KEY) chain = ( RunnableParallel( {"context": retriever | format_docs, "question": RunnablePassthrough()} ) | prompt | model | StrOutputParser() ) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-ollama-multi-query/README.md
# rag-ollama-multi-query This template performs RAG using Ollama and OpenAI with a multi-query retriever. The multi-query retriever is an example of query transformation, generating multiple queries from different perspectives based on the user's input query. For each query, it retrieves a set of relevant documents and takes the unique union across all queries for answer synthesis. We use a private, local LLM for the narrow task of query generation to avoid excessive calls to a larger LLM API. See an example trace for Ollama LLM performing the query expansion [here](https://smith.langchain.com/public/8017d04d-2045-4089-b47f-f2d66393a999/r). But we use OpenAI for the more challenging task of answer syntesis (full trace example [here](https://smith.langchain.com/public/ec75793b-645b-498d-b855-e8d85e1f6738/r)). ## Environment Setup To set up the environment, you need to download Ollama. Follow the instructions [here](https://python.langchain.com/docs/integrations/chat/ollama). You can choose the desired LLM with Ollama. This template uses `zephyr`, which can be accessed using `ollama pull zephyr`. There are many other options available [here](https://ollama.ai/library). Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. ## Usage To use this package, you should first install the LangChain CLI: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this package, do: ```shell langchain app new my-app --package rag-ollama-multi-query ``` To add this package to an existing project, run: ```shell langchain app add rag-ollama-multi-query ``` And add the following code to your `server.py` file: ```python from rag_ollama_multi_query import chain as rag_ollama_multi_query_chain add_routes(app, rag_ollama_multi_query_chain, path="/rag-ollama-multi-query") ``` (Optional) Now, let's configure LangSmith. LangSmith will help us trace, monitor, and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server running locally at [http://localhost:8000](http://localhost:8000) You can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) You can access the playground at [http://127.0.0.1:8000/rag-ollama-multi-query/playground](http://127.0.0.1:8000/rag-ollama-multi-query/playground) To access the template from code, use: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-ollama-multi-query") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-ollama-multi-query/rag_ollama_multi_query.ipynb
{ "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "681a5d1e", "metadata": {}, "source": [ "## Connect to template\n", "\n", "In `server.py`, set -\n", "```\n", "add_routes(app, chain_ext, path=\"/rag_ollama_multi_query\")\n", "```" ] }, { "cell_type": "code", "execution_count": 4, "id": "8d61a866-f91f-41ec-a840-270b0c9c895c", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'The various types of agent memory mentioned in the context are:\\n\\n1. Explicit / declarative memory: This refers to memory of facts and events, including episodic memory (events and experiences) and semantic memory (facts and concepts).\\n\\n2. Implicit / procedural memory: This type of memory is unconscious and involves skills and routines that are performed automatically, like riding a bike or typing on a keyboard.\\n\\n3. Short-term memory: This is the in-context learning utilized by the model to learn.\\n\\n4. Long-term memory: This provides the agent with the capability to retain and recall information over extended periods, often by leveraging an external vector store and fast retrieval.\\n\\n5. Sensory memory: This is the earliest stage of memory that retains impressions of sensory information (visual, auditory, etc) after the original stimuli have ended. It includes subcategories like iconic memory (visual), echoic memory (auditory), and haptic memory (touch).'" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from langserve.client import RemoteRunnable\n", "\n", "rag_app_ollama = RemoteRunnable(\"http://0.0.0.0:8001/rag_ollama_multi_query\")\n", "rag_app_ollama.invoke(\"What are the different types of agent memory?\")" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.16" } }, "nbformat": 4, "nbformat_minor": 5 }
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-ollama-multi-query/rag_ollama_multi_query/__init__.py
from rag_ollama_multi_query.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-ollama-multi-query/rag_ollama_multi_query/chain.py
from langchain.retrievers.multi_query import MultiQueryRetriever from langchain_community.chat_models import ChatOllama, ChatOpenAI from langchain_community.document_loaders import WebBaseLoader from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import Chroma from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate, PromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough from langchain_text_splitters import RecursiveCharacterTextSplitter # Load loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") data = loader.load() # Split text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) all_splits = text_splitter.split_documents(data) # Add to vectorDB vectorstore = Chroma.from_documents( documents=all_splits, collection_name="rag-private", embedding=OpenAIEmbeddings(), ) QUERY_PROMPT = PromptTemplate( input_variables=["question"], template="""You are an AI language model assistant. Your task is to generate five different versions of the given user question to retrieve relevant documents from a vector database. By generating multiple perspectives on the user question, your goal is to help the user overcome some of the limitations of the distance-based similarity search. Provide these alternative questions separated by newlines. Original question: {question}""", ) # Add the LLM downloaded from Ollama ollama_llm = "zephyr" llm = ChatOllama(model=ollama_llm) # Run retriever = MultiQueryRetriever.from_llm( vectorstore.as_retriever(), llm, prompt=QUERY_PROMPT ) # "lines" is the key (attribute name) of the parsed output # RAG prompt template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) # RAG model = ChatOpenAI() chain = ( RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) | prompt | model | StrOutputParser() ) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-multi-modal-mv-local/README.md
# rag-multi-modal-mv-local Visual search is a famililar application to many with iPhones or Android devices. It allows user to search photos using natural language. With the release of open source, multi-modal LLMs it's possible to build this kind of application for yourself for your own private photo collection. This template demonstrates how to perform private visual search and question-answering over a collection of your photos. It uses an open source multi-modal LLM of your choice to create image summaries for each photos, embeds the summaries, and stores them in Chroma. Given a question, relevant photos are retrieved and passed to the multi-modal LLM for answer synthesis. ![Diagram illustrating the visual search process with food pictures, captioning, a database, a question input, and the synthesis of an answer using a multi-modal LLM.](https://github.com/langchain-ai/langchain/assets/122662504/cd9b3d82-9b06-4a39-8490-7482466baf43 "Visual Search Process Diagram") ## Input Supply a set of photos in the `/docs` directory. By default, this template has a toy collection of 3 food pictures. The app will look up and summarize photos based upon provided keywords or questions: ``` What kind of ice cream did I have? ``` In practice, a larger corpus of images can be tested. To create an index of the images, run: ``` poetry install python ingest.py ``` ## Storage Here is the process the template will use to create an index of the slides (see [blog](https://blog.langchain.dev/multi-modal-rag-template/)): * Given a set of images * It uses a local multi-modal LLM ([bakllava](https://ollama.ai/library/bakllava)) to summarize each image * Embeds the image summaries with a link to the original images * Given a user question, it will relevant image(s) based on similarity between the image summary and user input (using Ollama embeddings) * It will pass those images to bakllava for answer synthesis By default, this will use [LocalFileStore](https://python.langchain.com/docs/integrations/stores/file_system) to store images and Chroma to store summaries. ## LLM and Embedding Models We will use [Ollama](https://python.langchain.com/docs/integrations/chat/ollama#multi-modal) for generating image summaries, embeddings, and the final image QA. Download the latest version of Ollama: https://ollama.ai/ Pull an open source multi-modal LLM: e.g., https://ollama.ai/library/bakllava Pull an open source embedding model: e.g., https://ollama.ai/library/llama2:7b ``` ollama pull bakllava ollama pull llama2:7b ``` The app is by default configured for `bakllava`. But you can change this in `chain.py` and `ingest.py` for different downloaded models. The app will retrieve images based on similarity between the text input and the image summary, and pass the images to `bakllava`. ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-multi-modal-mv-local ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-multi-modal-mv-local ``` And add the following code to your `server.py` file: ```python from rag_multi_modal_mv_local import chain as rag_multi_modal_mv_local_chain add_routes(app, rag_multi_modal_mv_local_chain, path="/rag-multi-modal-mv-local") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-multi-modal-mv-local/playground](http://127.0.0.1:8000/rag-multi-modal-mv-local/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-multi-modal-mv-local") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-multi-modal-mv-local/ingest.py
import base64 import io import os import uuid from io import BytesIO from pathlib import Path from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import LocalFileStore from langchain_community.chat_models import ChatOllama from langchain_community.embeddings import OllamaEmbeddings from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from PIL import Image def image_summarize(img_base64, prompt): """ Make image summary :param img_base64: Base64 encoded string for image :param prompt: Text prompt for summarizatiomn :return: Image summarization prompt """ chat = ChatOllama(model="bakllava", temperature=0) msg = chat.invoke( [ HumanMessage( content=[ {"type": "text", "text": prompt}, { "type": "image_url", "image_url": f"data:image/jpeg;base64,{img_base64}", }, ] ) ] ) return msg.content def generate_img_summaries(img_base64_list): """ Generate summaries for images :param img_base64_list: Base64 encoded images :return: List of image summaries and processed images """ # Store image summaries image_summaries = [] processed_images = [] # Prompt prompt = """Give a detailed summary of the image.""" # Apply summarization to images for i, base64_image in enumerate(img_base64_list): try: image_summaries.append(image_summarize(base64_image, prompt)) processed_images.append(base64_image) except Exception as e: print(f"Error with image {i+1}: {e}") return image_summaries, processed_images def get_images(img_path): """ Extract images. :param img_path: A string representing the path to the images. """ # Get image URIs pil_images = [ Image.open(os.path.join(img_path, image_name)) for image_name in os.listdir(img_path) if image_name.endswith(".jpg") ] return pil_images def resize_base64_image(base64_string, size=(128, 128)): """ Resize an image encoded as a Base64 string :param base64_string: Base64 string :param size: Image size :return: Re-sized Base64 string """ # Decode the Base64 string img_data = base64.b64decode(base64_string) img = Image.open(io.BytesIO(img_data)) # Resize the image resized_img = img.resize(size, Image.LANCZOS) # Save the resized image to a bytes buffer buffered = io.BytesIO() resized_img.save(buffered, format=img.format) # Encode the resized image to Base64 return base64.b64encode(buffered.getvalue()).decode("utf-8") def convert_to_base64(pil_image): """ Convert PIL images to Base64 encoded strings :param pil_image: PIL image :return: Re-sized Base64 string """ buffered = BytesIO() pil_image.save(buffered, format="JPEG") # You can change the format if needed img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") # img_str = resize_base64_image(img_str, size=(831,623)) return img_str def create_multi_vector_retriever(vectorstore, image_summaries, images): """ Create retriever that indexes summaries, but returns raw images or texts :param vectorstore: Vectorstore to store embedded image sumamries :param image_summaries: Image summaries :param images: Base64 encoded images :return: Retriever """ # Initialize the storage layer for images store = LocalFileStore( str(Path(__file__).parent / "multi_vector_retriever_metadata") ) id_key = "doc_id" # Create the multi-vector retriever retriever = MultiVectorRetriever( vectorstore=vectorstore, byte_store=store, id_key=id_key, ) # Helper function to add documents to the vectorstore and docstore def add_documents(retriever, doc_summaries, doc_contents): doc_ids = [str(uuid.uuid4()) for _ in doc_contents] summary_docs = [ Document(page_content=s, metadata={id_key: doc_ids[i]}) for i, s in enumerate(doc_summaries) ] retriever.vectorstore.add_documents(summary_docs) retriever.docstore.mset(list(zip(doc_ids, doc_contents))) add_documents(retriever, image_summaries, images) return retriever # Load images doc_path = Path(__file__).parent / "docs/" rel_doc_path = doc_path.relative_to(Path.cwd()) print("Read images") pil_images = get_images(rel_doc_path) # Convert to b64 images_base_64 = [convert_to_base64(i) for i in pil_images] # Image summaries print("Generate image summaries") image_summaries, images_base_64_processed = generate_img_summaries(images_base_64) # The vectorstore to use to index the images summaries vectorstore_mvr = Chroma( collection_name="image_summaries", persist_directory=str(Path(__file__).parent / "chroma_db_multi_modal"), embedding_function=OllamaEmbeddings(model="llama2:7b"), ) # Create documents images_base_64_processed_documents = [ Document(page_content=i) for i in images_base_64_processed ] # Create retriever retriever_multi_vector_img = create_multi_vector_retriever( vectorstore_mvr, image_summaries, images_base_64_processed_documents, )
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-multi-modal-mv-local/rag-multi-modal-mv-local.ipynb
{ "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "681a5d1e", "metadata": {}, "source": [ "## Run Template\n", "\n", "In `server.py`, set -\n", "```\n", "add_routes(app, chain_rag_conv, path=\"/rag-multi-modal-mv-local\")\n", "```" ] }, { "cell_type": "code", "execution_count": null, "id": "d774be2a", "metadata": {}, "outputs": [], "source": [ "from langserve.client import RemoteRunnable\n", "\n", "rag_app = RemoteRunnable(\"http://localhost:8001/rag-multi-modal-mv-local\")\n", "rag_app.invoke(\" < keywords here > \")" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.16" } }, "nbformat": 4, "nbformat_minor": 5 }
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/__init__.py
from rag_multi_modal_mv_local.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/chain.py
import base64 import io from pathlib import Path from langchain.pydantic_v1 import BaseModel from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import LocalFileStore from langchain_community.chat_models import ChatOllama from langchain_community.embeddings import OllamaEmbeddings from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnableLambda, RunnablePassthrough from PIL import Image def resize_base64_image(base64_string, size=(128, 128)): """ Resize an image encoded as a Base64 string. :param base64_string: A Base64 encoded string of the image to be resized. :param size: A tuple representing the new size (width, height) for the image. :return: A Base64 encoded string of the resized image. """ img_data = base64.b64decode(base64_string) img = Image.open(io.BytesIO(img_data)) resized_img = img.resize(size, Image.LANCZOS) buffered = io.BytesIO() resized_img.save(buffered, format=img.format) return base64.b64encode(buffered.getvalue()).decode("utf-8") def get_resized_images(docs): """ Resize images from base64-encoded strings. :param docs: A list of base64-encoded image to be resized. :return: Dict containing a list of resized base64-encoded strings. """ b64_images = [] for doc in docs: if isinstance(doc, Document): doc = doc.page_content # Optional: re-size image # resized_image = resize_base64_image(doc, size=(1280, 720)) b64_images.append(doc) return {"images": b64_images} def img_prompt_func(data_dict, num_images=1): """ Ollama prompt for image analysis. :param data_dict: A dict with images and a user-provided question. :param num_images: Number of images to include in the prompt. :return: A list containing message objects for each image and the text prompt. """ messages = [] if data_dict["context"]["images"]: for image in data_dict["context"]["images"][:num_images]: image_message = { "type": "image_url", "image_url": f"data:image/jpeg;base64,{image}", } messages.append(image_message) text_message = { "type": "text", "text": ( "You are a helpful assistant that gives a description of food pictures.\n" "Give a detailed summary of the image.\n" ), } messages.append(text_message) return [HumanMessage(content=messages)] def multi_modal_rag_chain(retriever): """ Multi-modal RAG chain, :param retriever: A function that retrieves the necessary context for the model. :return: A chain of functions representing the multi-modal RAG process. """ # Initialize the multi-modal Large Language Model with specific parameters model = ChatOllama(model="bakllava", temperature=0) # Define the RAG pipeline chain = ( { "context": retriever | RunnableLambda(get_resized_images), "question": RunnablePassthrough(), } | RunnableLambda(img_prompt_func) | model | StrOutputParser() ) return chain # Load chroma vectorstore_mvr = Chroma( collection_name="image_summaries", persist_directory=str(Path(__file__).parent.parent / "chroma_db_multi_modal"), embedding_function=OllamaEmbeddings(model="llama2:7b"), ) # Load file store store = LocalFileStore( str(Path(__file__).parent.parent / "multi_vector_retriever_metadata") ) id_key = "doc_id" # Create the multi-vector retriever retriever = MultiVectorRetriever( vectorstore=vectorstore_mvr, byte_store=store, id_key=id_key, ) # Create RAG chain chain = multi_modal_rag_chain(retriever) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-multi-modal-local/README.md
# rag-multi-modal-local Visual search is a famililar application to many with iPhones or Android devices. It allows user to search photos using natural language. With the release of open source, multi-modal LLMs it's possible to build this kind of application for yourself for your own private photo collection. This template demonstrates how to perform private visual search and question-answering over a collection of your photos. It uses [`nomic-embed-vision-v1`](https://huggingface.co/nomic-ai/nomic-embed-vision-v1) multi-modal embeddings to embed the images and `Ollama` for question-answering. Given a question, relevant photos are retrieved and passed to an open source multi-modal LLM of your choice for answer synthesis. ![Diagram illustrating the visual search process with nomic-embed-vision-v1 embeddings and multi-modal LLM for question-answering, featuring example food pictures and a matcha soft serve answer trace.](https://github.com/langchain-ai/langchain/assets/122662504/da543b21-052c-4c43-939e-d4f882a45d75 "Visual Search Process Diagram") ## Input Supply a set of photos in the `/docs` directory. By default, this template has a toy collection of 3 food pictures. Example questions to ask can be: ``` What kind of soft serve did I have? ``` In practice, a larger corpus of images can be tested. To create an index of the images, run: ``` poetry install python ingest.py ``` ## Storage This template will use [nomic-embed-vision-v1](https://huggingface.co/nomic-ai/nomic-embed-vision-v1) multi-modal embeddings to embed the images. The first time you run the app, it will automatically download the multimodal embedding model. You can choose alternative models in `rag_chroma_multi_modal/ingest.py`, such as `OpenCLIPEmbeddings`. ``` langchain_experimental.open_clip import OpenCLIPEmbeddings embedding_function=OpenCLIPEmbeddings( model_name="ViT-H-14", checkpoint="laion2b_s32b_b79k" ) vectorstore_mmembd = Chroma( collection_name="multi-modal-rag", persist_directory=str(re_vectorstore_path), embedding_function=embedding_function ) ``` ## LLM This template will use [Ollama](https://python.langchain.com/docs/integrations/chat/ollama#multi-modal). Download the latest version of Ollama: https://ollama.ai/ Pull the an open source multi-modal LLM: e.g., https://ollama.ai/library/bakllava ``` ollama pull bakllava ``` The app is by default configured for `bakllava`. But you can change this in `chain.py` and `ingest.py` for different downloaded models. ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-chroma-multi-modal ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-chroma-multi-modal ``` And add the following code to your `server.py` file: ```python from rag_chroma_multi_modal import chain as rag_chroma_multi_modal_chain add_routes(app, rag_chroma_multi_modal_chain, path="/rag-chroma-multi-modal") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-chroma-multi-modal/playground](http://127.0.0.1:8000/rag-chroma-multi-modal/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-chroma-multi-modal") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-multi-modal-local/ingest.py
import os from pathlib import Path from langchain_community.vectorstores import Chroma from langchain_nomic import NomicMultimodalEmbeddings # Load images img_dump_path = Path(__file__).parent / "docs/" rel_img_dump_path = img_dump_path.relative_to(Path.cwd()) image_uris = sorted( [ os.path.join(rel_img_dump_path, image_name) for image_name in os.listdir(rel_img_dump_path) if image_name.endswith(".jpg") ] ) # Index vectorstore = Path(__file__).parent / "chroma_db_multi_modal" re_vectorstore_path = vectorstore.relative_to(Path.cwd()) # Load embedding function print("Loading embedding function") embedding = NomicMultimodalEmbeddings( vision_model="nomic-embed-vision-v1", text_model="nomic-embed-text-v1" ) # Create chroma vectorstore_mmembd = Chroma( collection_name="multi-modal-rag", persist_directory=str(Path(__file__).parent / "chroma_db_multi_modal"), embedding_function=embedding, ) # Add images print("Embedding images") vectorstore_mmembd.add_images(uris=image_uris)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-multi-modal-local/rag_multi_modal_local.ipynb
{ "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "681a5d1e", "metadata": {}, "source": [ "## Run Template\n", "\n", "In `server.py`, set -\n", "```\n", "add_routes(app, chain_rag_conv, path=\"/rag-multi-modal-local\")\n", "```" ] }, { "cell_type": "code", "execution_count": null, "id": "d774be2a", "metadata": {}, "outputs": [], "source": [ "from langserve.client import RemoteRunnable\n", "\n", "rag_app = RemoteRunnable(\"http://localhost:8001/rag-multi-modal-local\")\n", "rag_app.invoke(\" < keywords here > \")" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.16" } }, "nbformat": 4, "nbformat_minor": 5 }
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-multi-modal-local/rag_multi_modal_local/__init__.py
from rag_multi_modal_local.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-multi-modal-local/rag_multi_modal_local/chain.py
import base64 import io from pathlib import Path from langchain_community.chat_models import ChatOllama from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableLambda, RunnablePassthrough from langchain_nomic import NomicMultimodalEmbeddings from PIL import Image def resize_base64_image(base64_string, size=(128, 128)): """ Resize an image encoded as a Base64 string. :param base64_string: A Base64 encoded string of the image to be resized. :param size: A tuple representing the new size (width, height) for the image. :return: A Base64 encoded string of the resized image. """ img_data = base64.b64decode(base64_string) img = Image.open(io.BytesIO(img_data)) resized_img = img.resize(size, Image.LANCZOS) buffered = io.BytesIO() resized_img.save(buffered, format=img.format) return base64.b64encode(buffered.getvalue()).decode("utf-8") def get_resized_images(docs): """ Resize images from base64-encoded strings. :param docs: A list of base64-encoded image to be resized. :return: Dict containing a list of resized base64-encoded strings. """ b64_images = [] for doc in docs: if isinstance(doc, Document): doc = doc.page_content # Optional: re-size image # resized_image = resize_base64_image(doc, size=(1280, 720)) b64_images.append(doc) return {"images": b64_images} def img_prompt_func(data_dict, num_images=1): """ GPT-4V prompt for image analysis. :param data_dict: A dict with images and a user-provided question. :param num_images: Number of images to include in the prompt. :return: A list containing message objects for each image and the text prompt. """ messages = [] if data_dict["context"]["images"]: for image in data_dict["context"]["images"][:num_images]: image_message = { "type": "image_url", "image_url": f"data:image/jpeg;base64,{image}", } messages.append(image_message) text_message = { "type": "text", "text": ( "You are a helpful assistant that gives a description of food pictures.\n" "Give a detailed summary of the image.\n" "Give reccomendations for similar foods to try.\n" ), } messages.append(text_message) return [HumanMessage(content=messages)] def multi_modal_rag_chain(retriever): """ Multi-modal RAG chain, :param retriever: A function that retrieves the necessary context for the model. :return: A chain of functions representing the multi-modal RAG process. """ # Initialize the multi-modal Large Language Model with specific parameters model = ChatOllama(model="bakllava", temperature=0) # Define the RAG pipeline chain = ( { "context": retriever | RunnableLambda(get_resized_images), "question": RunnablePassthrough(), } | RunnableLambda(img_prompt_func) | model | StrOutputParser() ) return chain # Load chroma vectorstore_mmembd = Chroma( collection_name="multi-modal-rag", persist_directory=str(Path(__file__).parent.parent / "chroma_db_multi_modal"), embedding_function=NomicMultimodalEmbeddings( vision_model="nomic-embed-vision-v1", text_model="nomic-embed-text-v1" ), ) # Make retriever retriever_mmembd = vectorstore_mmembd.as_retriever() # Create RAG chain chain = multi_modal_rag_chain(retriever_mmembd) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-multi-index-router/README.md
# RAG with Multiple Indexes (Routing) A QA application that routes between different domain-specific retrievers given a user question. ## Environment Setup This application queries PubMed, ArXiv, Wikipedia, and [Kay AI](https://www.kay.ai) (for SEC filings). You will need to create a free Kay AI account and [get your API key here](https://www.kay.ai). Then set environment variable: ```bash export KAY_API_KEY="<YOUR_API_KEY>" ``` ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-multi-index-router ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-multi-index-router ``` And add the following code to your `server.py` file: ```python from rag_multi_index_router import chain as rag_multi_index_router_chain add_routes(app, rag_multi_index_router_chain, path="/rag-multi-index-router") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-multi-index-router/playground](http://127.0.0.1:8000/rag-multi-index-router/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-multi-index-router") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-multi-index-router/rag_multi_index_router/__init__.py
from rag_multi_index_router.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-multi-index-router/rag_multi_index_router/chain.py
from operator import itemgetter from typing import Literal from langchain.retrievers import ( ArxivRetriever, KayAiRetriever, PubMedRetriever, WikipediaRetriever, ) from langchain.utils.openai_functions import convert_pydantic_to_openai_function from langchain_community.chat_models import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.output_parsers.openai_functions import ( PydanticAttrOutputFunctionsParser, ) from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import ( RouterRunnable, RunnableParallel, RunnablePassthrough, ) pubmed = PubMedRetriever(top_k_results=5).with_config(run_name="pubmed") arxiv = ArxivRetriever(top_k_results=5).with_config(run_name="arxiv") sec = KayAiRetriever.create( dataset_id="company", data_types=["10-K"], num_contexts=5 ).with_config(run_name="sec_filings") wiki = WikipediaRetriever(top_k_results=5, doc_content_chars_max=2000).with_config( run_name="wiki" ) llm = ChatOpenAI(model="gpt-3.5-turbo") class Search(BaseModel): """Search for relevant documents by question topic.""" question_resource: Literal[ "medical paper", "scientific paper", "public company finances report", "general" ] = Field( ..., description=( "The type of resource that would best help answer the user's question. " "If none of the types are relevant return 'general'." ), ) retriever_name = { "medical paper": "PubMed", "scientific paper": "ArXiv", "public company finances report": "SEC filings (Kay AI)", "general": "Wikipedia", } classifier = ( llm.bind( functions=[convert_pydantic_to_openai_function(Search)], function_call={"name": "Search"}, ) | PydanticAttrOutputFunctionsParser( pydantic_schema=Search, attr_name="question_resource" ) | retriever_name.get ) retriever_map = { "PubMed": pubmed, "ArXiv": arxiv, "SEC filings (Kay AI)": sec, "Wikipedia": wiki, } router_retriever = RouterRunnable(runnables=retriever_map) def format_docs(docs): return "\n\n".join(f"Source {i}:\n{doc.page_content}" for i, doc in enumerate(docs)) system = """Answer the user question. Use the following sources to help \ answer the question. If you don't know the answer say "I'm not sure, I couldn't \ find information on {{topic}}." Sources: {sources}""" prompt = ChatPromptTemplate.from_messages([("system", system), ("human", "{question}")]) class Question(BaseModel): __root__: str retriever_chain = ( {"input": itemgetter("question"), "key": itemgetter("retriever_choice")} | router_retriever | format_docs ).with_config(run_name="retrieve") answer_chain = ( {"sources": retriever_chain, "question": itemgetter("question")} | prompt | llm | StrOutputParser() ) chain = ( ( RunnableParallel( question=RunnablePassthrough(), retriever_choice=classifier ).with_config(run_name="classify") | RunnablePassthrough.assign(answer=answer_chain).with_config(run_name="answer") ) .with_config(run_name="QA with router") .with_types(input_type=Question) )
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-multi-index-fusion/README.md
# RAG with Multiple Indexes (Fusion) A QA application that queries multiple domain-specific retrievers and selects the most relevant documents from across all retrieved results. ## Environment Setup This application queries PubMed, ArXiv, Wikipedia, and [Kay AI](https://www.kay.ai) (for SEC filings). You will need to create a free Kay AI account and [get your API key here](https://www.kay.ai). Then set environment variable: ```bash export KAY_API_KEY="<YOUR_API_KEY>" ``` ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-multi-index-fusion ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-multi-index-fusion ``` And add the following code to your `server.py` file: ```python from rag_multi_index_fusion import chain as rag_multi_index_fusion_chain add_routes(app, rag_multi_index_fusion_chain, path="/rag-multi-index-fusion") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-multi-index-fusion/playground](http://127.0.0.1:8000/rag-multi-index-fusion/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-multi-index-fusion") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-multi-index-fusion/rag_multi_index_fusion/__init__.py
from rag_multi_index_fusion.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-multi-index-fusion/rag_multi_index_fusion/chain.py
from operator import itemgetter import numpy as np from langchain.retrievers import ( ArxivRetriever, KayAiRetriever, PubMedRetriever, WikipediaRetriever, ) from langchain.utils.math import cosine_similarity from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import ( RunnableParallel, RunnablePassthrough, ) pubmed = PubMedRetriever(top_k_results=5).with_config(run_name="pubmed") arxiv = ArxivRetriever(top_k_results=5).with_config(run_name="arxiv") sec = KayAiRetriever.create( dataset_id="company", data_types=["10-K"], num_contexts=5 ).with_config(run_name="sec_filings") wiki = WikipediaRetriever(top_k_results=5, doc_content_chars_max=2000).with_config( run_name="wiki" ) embeddings = OpenAIEmbeddings() def fuse_retrieved_docs(input): results_map = input["sources"] query = input["question"] embedded_query = embeddings.embed_query(query) names, docs = zip( *((name, doc) for name, docs in results_map.items() for doc in docs) ) embedded_docs = embeddings.embed_documents([doc.page_content for doc in docs]) similarity = cosine_similarity( [embedded_query], embedded_docs, ) most_similar = np.flip(np.argsort(similarity[0]))[:5] return [ ( names[i], docs[i], ) for i in most_similar ] def format_named_docs(named_docs): return "\n\n".join( f"Source: {source}\n\n{doc.page_content}" for source, doc in named_docs ) system = """Answer the user question. Use the following sources to help \ answer the question. If you don't know the answer say "I'm not sure, I couldn't \ find information on {{topic}}." Sources: {sources}""" prompt = ChatPromptTemplate.from_messages([("system", system), ("human", "{question}")]) retrieve_all = RunnableParallel( {"ArXiv": arxiv, "Wikipedia": wiki, "PubMed": pubmed, "SEC 10-K Forms": sec} ).with_config(run_name="retrieve_all") class Question(BaseModel): __root__: str answer_chain = ( { "question": itemgetter("question"), "sources": lambda x: format_named_docs(x["sources"]), } | prompt | ChatOpenAI(model="gpt-3.5-turbo-1106") | StrOutputParser() ).with_config(run_name="answer") chain = ( ( RunnableParallel( {"question": RunnablePassthrough(), "sources": retrieve_all} ).with_config(run_name="add_sources") | RunnablePassthrough.assign(sources=fuse_retrieved_docs).with_config( run_name="fuse" ) | RunnablePassthrough.assign(answer=answer_chain).with_config( run_name="add_answer" ) ) .with_config(run_name="QA with fused results") .with_types(input_type=Question) )
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-mongo/README.md
# rag-mongo This template performs RAG using MongoDB and OpenAI. ## Environment Setup You should export two environment variables, one being your MongoDB URI, the other being your OpenAI API KEY. If you do not have a MongoDB URI, see the `Setup Mongo` section at the bottom for instructions on how to do so. ```shell export MONGO_URI=... export OPENAI_API_KEY=... ``` ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-mongo ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-mongo ``` And add the following code to your `server.py` file: ```python from rag_mongo import chain as rag_mongo_chain add_routes(app, rag_mongo_chain, path="/rag-mongo") ``` If you want to set up an ingestion pipeline, you can add the following code to your `server.py` file: ```python from rag_mongo import ingest as rag_mongo_ingest add_routes(app, rag_mongo_ingest, path="/rag-mongo-ingest") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you DO NOT already have a Mongo Search Index you want to connect to, see `MongoDB Setup` section below before proceeding. If you DO have a MongoDB Search index you want to connect to, edit the connection details in `rag_mongo/chain.py` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-mongo/playground](http://127.0.0.1:8000/rag-mongo/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-mongo") ``` For additional context, please refer to [this notebook](https://colab.research.google.com/drive/1cr2HBAHyBmwKUerJq2if0JaNhy-hIq7I#scrollTo=TZp7_CBfxTOB). ## MongoDB Setup Use this step if you need to setup your MongoDB account and ingest data. We will first follow the standard MongoDB Atlas setup instructions [here](https://www.mongodb.com/docs/atlas/getting-started/). 1. Create an account (if not already done) 2. Create a new project (if not already done) 3. Locate your MongoDB URI. This can be done by going to the deployment overview page and connecting to you database ![Screenshot highlighting the 'Connect' button in MongoDB Atlas.](_images/connect.png "MongoDB Atlas Connect Button") We then look at the drivers available ![Screenshot showing the MongoDB Atlas drivers section for connecting to the database.](_images/driver.png "MongoDB Atlas Drivers Section") Among which we will see our URI listed ![Screenshot displaying an example of a MongoDB URI in the connection instructions.](_images/uri.png "MongoDB URI Example") Let's then set that as an environment variable locally: ```shell export MONGO_URI=... ``` 4. Let's also set an environment variable for OpenAI (which we will use as an LLM) ```shell export OPENAI_API_KEY=... ``` 5. Let's now ingest some data! We can do that by moving into this directory and running the code in `ingest.py`, eg: ```shell python ingest.py ``` Note that you can (and should!) change this to ingest data of your choice 6. We now need to set up a vector index on our data. We can first connect to the cluster where our database lives ![Screenshot of the MongoDB Atlas interface showing the cluster overview with a 'Connect' button.](_images/cluster.png "MongoDB Atlas Cluster Overview") We can then navigate to where all our collections are listed ![Screenshot of the MongoDB Atlas interface showing the collections overview within a database.](_images/collections.png "MongoDB Atlas Collections Overview") We can then find the collection we want and look at the search indexes for that collection ![Screenshot showing the search indexes section in MongoDB Atlas for a specific collection.](_images/search-indexes.png "MongoDB Atlas Search Indexes") That should likely be empty, and we want to create a new one: ![Screenshot highlighting the 'Create Index' button in MongoDB Atlas.](_images/create.png "MongoDB Atlas Create Index Button") We will use the JSON editor to create it ![Screenshot showing the JSON Editor option for creating a search index in MongoDB Atlas.](_images/json_editor.png "MongoDB Atlas JSON Editor Option") And we will paste the following JSON in: ```text { "mappings": { "dynamic": true, "fields": { "embedding": { "dimensions": 1536, "similarity": "cosine", "type": "knnVector" } } } } ``` ![Screenshot of the JSON configuration for a search index in MongoDB Atlas.](_images/json.png "MongoDB Atlas Search Index JSON Configuration") From there, hit "Next" and then "Create Search Index". It will take a little bit but you should then have an index over your data!
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-mongo/ingest.py
import os from langchain_community.document_loaders import PyPDFLoader from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import MongoDBAtlasVectorSearch from langchain_text_splitters import RecursiveCharacterTextSplitter from pymongo import MongoClient MONGO_URI = os.environ["MONGO_URI"] # Note that if you change this, you also need to change it in `rag_mongo/chain.py` DB_NAME = "langchain-test-2" COLLECTION_NAME = "test" ATLAS_VECTOR_SEARCH_INDEX_NAME = "default" EMBEDDING_FIELD_NAME = "embedding" client = MongoClient(MONGO_URI) db = client[DB_NAME] MONGODB_COLLECTION = db[COLLECTION_NAME] if __name__ == "__main__": # Load docs loader = PyPDFLoader("https://arxiv.org/pdf/2303.08774.pdf") data = loader.load() # Split docs text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) docs = text_splitter.split_documents(data) # Insert the documents in MongoDB Atlas Vector Search _ = MongoDBAtlasVectorSearch.from_documents( documents=docs, embedding=OpenAIEmbeddings(disallowed_special=()), collection=MONGODB_COLLECTION, index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME, )
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-mongo/rag_mongo.ipynb
{ "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "681a5d1e", "metadata": {}, "source": [ "## Connect to template\n", "\n", "In `server.py`, set -\n", "```\n", "add_routes(app, chain_ext, path=\"/rag_mongo\")\n", "```" ] }, { "cell_type": "code", "execution_count": null, "id": "d774be2a", "metadata": {}, "outputs": [], "source": [ "from langserve.client import RemoteRunnable\n", "\n", "rag_app_pinecone = RemoteRunnable(\"http://0.0.0.0:8001/rag_mongo\")\n", "rag_app_pinecone.invoke(\"How does agent memory work?\")" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.16" } }, "nbformat": 4, "nbformat_minor": 5 }
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-mongo/rag_mongo/__init__.py
from rag_mongo.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-mongo/rag_mongo/chain.py
import os from langchain_community.chat_models import ChatOpenAI from langchain_community.document_loaders import PyPDFLoader from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import MongoDBAtlasVectorSearch from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import ( RunnableLambda, RunnableParallel, RunnablePassthrough, ) from langchain_text_splitters import RecursiveCharacterTextSplitter from pymongo import MongoClient # Set DB if os.environ.get("MONGO_URI", None) is None: raise Exception("Missing `MONGO_URI` environment variable.") MONGO_URI = os.environ["MONGO_URI"] DB_NAME = "langchain-test-2" COLLECTION_NAME = "test" ATLAS_VECTOR_SEARCH_INDEX_NAME = "default" client = MongoClient(MONGO_URI) db = client[DB_NAME] MONGODB_COLLECTION = db[COLLECTION_NAME] # Read from MongoDB Atlas Vector Search vectorstore = MongoDBAtlasVectorSearch.from_connection_string( MONGO_URI, DB_NAME + "." + COLLECTION_NAME, OpenAIEmbeddings(disallowed_special=()), index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME, ) retriever = vectorstore.as_retriever() # RAG prompt template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) # RAG model = ChatOpenAI() chain = ( RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) | prompt | model | StrOutputParser() ) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question) def _ingest(url: str) -> dict: loader = PyPDFLoader(url) data = loader.load() # Split docs text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) docs = text_splitter.split_documents(data) # Insert the documents in MongoDB Atlas Vector Search _ = MongoDBAtlasVectorSearch.from_documents( documents=docs, embedding=OpenAIEmbeddings(disallowed_special=()), collection=MONGODB_COLLECTION, index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME, ) return {} ingest = RunnableLambda(_ingest)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-momento-vector-index/README.md
# rag-momento-vector-index This template performs RAG using Momento Vector Index (MVI) and OpenAI. > MVI: the most productive, easiest to use, serverless vector index for your data. To get started with MVI, simply sign up for an account. There's no need to handle infrastructure, manage servers, or be concerned about scaling. MVI is a service that scales automatically to meet your needs. Combine with other Momento services such as Momento Cache to cache prompts and as a session store or Momento Topics as a pub/sub system to broadcast events to your application. To sign up and access MVI, visit the [Momento Console](https://console.gomomento.com/). ## Environment Setup This template uses Momento Vector Index as a vectorstore and requires that `MOMENTO_API_KEY`, and `MOMENTO_INDEX_NAME` are set. Go to the [console](https://console.gomomento.com/) to get an API key. Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-momento-vector-index ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-momento-vector-index ``` And add the following code to your `server.py` file: ```python from rag_momento_vector_index import chain as rag_momento_vector_index_chain add_routes(app, rag_momento_vector_index_chain, path="/rag-momento-vector-index") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-momento-vector-index/playground](http://127.0.0.1:8000/rag-momento-vector-index/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-momento-vector-index") ``` ## Indexing Data We have included a sample module to index data. That is available at `rag_momento_vector_index/ingest.py`. You will see a commented out line in `chain.py` that invokes this. Uncomment to use.
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-momento-vector-index/rag_momento_vector_index/__init__.py
from rag_momento_vector_index.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-momento-vector-index/rag_momento_vector_index/chain.py
import os from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import MomentoVectorIndex from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnablePassthrough from momento import ( CredentialProvider, PreviewVectorIndexClient, VectorIndexConfigurations, ) API_KEY_ENV_VAR_NAME = "MOMENTO_API_KEY" if os.environ.get(API_KEY_ENV_VAR_NAME, None) is None: raise Exception(f"Missing `{API_KEY_ENV_VAR_NAME}` environment variable.") MOMENTO_INDEX_NAME = os.environ.get("MOMENTO_INDEX_NAME", "langchain-test") ### Sample Ingest Code - this populates the vector index with data ### Run this on the first time to seed with data # from rag_momento_vector_index import ingest # ingest.load(API_KEY_ENV_VAR_NAME, MOMENTO_INDEX_NAME) vectorstore = MomentoVectorIndex( embedding=OpenAIEmbeddings(), client=PreviewVectorIndexClient( configuration=VectorIndexConfigurations.Default.latest(), credential_provider=CredentialProvider.from_environment_variable( API_KEY_ENV_VAR_NAME ), ), index_name=MOMENTO_INDEX_NAME, ) retriever = vectorstore.as_retriever() # RAG prompt template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) # RAG model = ChatOpenAI() chain = ( {"context": retriever, "question": RunnablePassthrough()} | prompt | model | StrOutputParser() ) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-momento-vector-index/rag_momento_vector_index/ingest.py
### Ingest code - you may need to run this the first time import os from langchain_community.document_loaders import WebBaseLoader from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import MomentoVectorIndex from langchain_text_splitters import RecursiveCharacterTextSplitter from momento import ( CredentialProvider, PreviewVectorIndexClient, VectorIndexConfigurations, ) def load(API_KEY_ENV_VAR_NAME: str, index_name: str) -> None: if os.environ.get(API_KEY_ENV_VAR_NAME, None) is None: raise Exception(f"Missing `{API_KEY_ENV_VAR_NAME}` environment variable.") # Load loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") data = loader.load() # Split text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) all_splits = text_splitter.split_documents(data) # Add to vectorDB MomentoVectorIndex.from_documents( all_splits, embedding=OpenAIEmbeddings(), client=PreviewVectorIndexClient( configuration=VectorIndexConfigurations.Default.latest(), credential_provider=CredentialProvider.from_environment_variable( API_KEY_ENV_VAR_NAME ), ), index_name=index_name, )
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-milvus/README.md
# rag-milvus This template performs RAG using Milvus and OpenAI. ## Environment Setup Start the milvus server instance, and get the host ip and port. Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-milvus ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-milvus ``` And add the following code to your `server.py` file: ```python from rag_milvus import chain as rag_milvus_chain add_routes(app, rag_milvus_chain, path="/rag-milvus") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-milvus/playground](http://127.0.0.1:8000/rag-milvus/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-milvus") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-milvus/rag_milvus/__init__.py
from rag_milvus.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-milvus/rag_milvus/chain.py
from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough from langchain_milvus.vectorstores import Milvus from langchain_openai import ChatOpenAI, OpenAIEmbeddings # Example for document loading (from url), splitting, and creating vectorstore # Setting the URI as a local file, e.g.`./milvus.db`, is the most convenient method, # as it automatically utilizes Milvus Lite to store all data in this file. # # If you have large scale of data such as more than a million docs, # we recommend setting up a more performant Milvus server on docker or kubernetes. # (https://milvus.io/docs/quickstart.md) # When using this setup, please use the server URI, # e.g.`http://localhost:19530`, as your URI. URI = "./milvus.db" """ # Load from langchain_community.document_loaders import WebBaseLoader loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") data = loader.load() # Split from langchain_text_splitters import RecursiveCharacterTextSplitter text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) all_splits = text_splitter.split_documents(data) # Add to vectorDB vectorstore = Milvus.from_documents(documents=all_splits, collection_name="rag_milvus", embedding=OpenAIEmbeddings(), drop_old=True, connection_args={"uri": URI}, ) retriever = vectorstore.as_retriever() """ # Embed a single document as a test vectorstore = Milvus.from_texts( ["harrison worked at kensho"], collection_name="rag_milvus", embedding=OpenAIEmbeddings(), drop_old=True, connection_args={"uri": URI}, ) retriever = vectorstore.as_retriever() # RAG prompt template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) # LLM model = ChatOpenAI() # RAG chain chain = ( RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) | prompt | model | StrOutputParser() ) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-matching-engine/README.md
# rag-matching-engine This template performs RAG using Google Cloud Platform's Vertex AI with the matching engine. It will utilize a previously created index to retrieve relevant documents or contexts based on user-provided questions. ## Environment Setup An index should be created before running the code. The process to create this index can be found [here](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/language/use-cases/document-qa/question_answering_documents_langchain_matching_engine.ipynb). Environment variables for Vertex should be set: ``` PROJECT_ID ME_REGION GCS_BUCKET ME_INDEX_ID ME_ENDPOINT_ID ``` ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-matching-engine ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-matching-engine ``` And add the following code to your `server.py` file: ```python from rag_matching_engine import chain as rag_matching_engine_chain add_routes(app, rag_matching_engine_chain, path="/rag-matching-engine") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-matching-engine/playground](http://127.0.0.1:8000/rag-matching-engine/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-matching-engine") ``` For more details on how to connect to the template, refer to the Jupyter notebook `rag_matching_engine`.
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-matching-engine/rag_matching_engine/__init__.py
from rag_matching_engine.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-matching-engine/rag_matching_engine/chain.py
import os from langchain_community.embeddings import VertexAIEmbeddings from langchain_community.llms import VertexAI from langchain_community.vectorstores import MatchingEngine from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import PromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough # you need to preate the index first, for example, as described here: # https://github.com/GoogleCloudPlatform/generative-ai/blob/main/language/use-cases/document-qa/question_answering_documents_langchain_matching_engine.ipynb expected_variables = [ "project_id", "me_region", "gcs_bucket", "me_index_id", "me_endpoint_id", ] variables = [] for variable_name in expected_variables: variable = os.environ.get(variable_name.upper()) if not variable: raise Exception(f"Missing `{variable_name}` environment variable.") variables.append(variable) project_id, me_region, gcs_bucket, me_index_id, me_endpoint_id = variables vectorstore = MatchingEngine.from_components( project_id=project_id, region=me_region, gcs_bucket_name=gcs_bucket, embedding=VertexAIEmbeddings(), index_id=me_index_id, endpoint_id=me_endpoint_id, ) model = VertexAI() template = ( "SYSTEM: You are an intelligent assistant helping the users with their questions" "on research papers.\n\n" "Question: {question}\n\n" "Strictly Use ONLY the following pieces of context to answer the question at the " "end. Think step-by-step and then answer.\n\n" "Do not try to make up an answer:\n" "- If the answer to the question cannot be determined from the context alone, " 'say \n"I cannot determine the answer to that."\n' '- If the context is empty, just say "I do not know the answer to that."\n\n' "=============\n{context}\n=============\n\n" "Question: {question}\nHelpful Answer: " ) prompt = PromptTemplate.from_template(template) retriever = vectorstore.as_retriever( search_type="similarity", search_kwargs={ "k": 10, "search_distance": 0.6, }, ) chain = ( RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) | prompt | model | StrOutputParser() ) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-lantern/README.md
# rag_lantern This template performs RAG with Lantern. [Lantern](https://lantern.dev) is an open-source vector database built on top of [PostgreSQL](https://en.wikipedia.org/wiki/PostgreSQL). It enables vector search and embedding generation inside your database. ## Environment Setup Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. To get your `OPENAI_API_KEY`, navigate to [API keys](https://platform.openai.com/account/api-keys) on your OpenAI account and create a new secret key. To find your `LANTERN_URL` and `LANTERN_SERVICE_KEY`, head to your Lantern project's [API settings](https://lantern.dev/dashboard/project/_/settings/api). - `LANTERN_URL` corresponds to the Project URL - `LANTERN_SERVICE_KEY` corresponds to the `service_role` API key ```shell export LANTERN_URL= export LANTERN_SERVICE_KEY= export OPENAI_API_KEY= ``` ## Setup Lantern Database Use these steps to setup your Lantern database if you haven't already. 1. Head to [https://lantern.dev](https://lantern.dev) to create your Lantern database. 2. In your favorite SQL client, jump to the SQL editor and run the following script to setup your database as a vector store: ```sql -- Create a table to store your documents create table documents ( id uuid primary key, content text, -- corresponds to Document.pageContent metadata jsonb, -- corresponds to Document.metadata embedding REAL[1536] -- 1536 works for OpenAI embeddings, change as needed ); -- Create a function to search for documents create function match_documents ( query_embedding REAL[1536], filter jsonb default '{}' ) returns table ( id uuid, content text, metadata jsonb, similarity float ) language plpgsql as $$ #variable_conflict use_column begin return query select id, content, metadata, 1 - (documents.embedding <=> query_embedding) as similarity from documents where metadata @> filter order by documents.embedding <=> query_embedding; end; $$; ``` ## Setup Environment Variables Since we are using [`Lantern`](https://python.langchain.com/docs/integrations/vectorstores/lantern) and [`OpenAIEmbeddings`](https://python.langchain.com/docs/integrations/text_embedding/openai), we need to load their API keys. ## Usage First, install the LangChain CLI: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-lantern ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-lantern ``` And add the following code to your `server.py` file: ```python from rag_lantern.chain import chain as rag_lantern_chain add_routes(app, rag_lantern_chain, path="/rag-lantern") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-lantern/playground](http://127.0.0.1:8000/rag-lantern/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-lantern") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-lantern/rag_lantern/chain.py
from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import Lantern from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough CONNECTION_STRING = "postgresql://postgres:postgres@localhost:5432" COLLECTION_NAME = "documents" DB_NAME = "postgres" embeddings = OpenAIEmbeddings() vectorstore = Lantern( collection_name=COLLECTION_NAME, connection_string=CONNECTION_STRING, embedding_function=embeddings, ) retriever = vectorstore.as_retriever() template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) model = ChatOpenAI() chain = ( RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) | prompt | model | StrOutputParser() ) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-lancedb/README.md
# rag-lancedb This template performs RAG using LanceDB and OpenAI. ## Environment Setup Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-lancedb ``` If you want to add this to as existing project, you can just run: ```shell langchain app add rag-lancedb ``` And add the following code to your `server.py` file: ```python from rag_lancedb import chain as rag_lancedb_chain add_routes(app, rag_lancedb_chain, path="/rag-lancedb") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-lancedb/playground](http://127.0.0.1:8000/rag-lancedb/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-lancedb") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-lancedb/rag_lancedb/__init__.py
from rag_lancedb.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-lancedb/rag_lancedb/chain.py
from langchain_community.vectorstores import LanceDB from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough from langchain_openai import ChatOpenAI, OpenAIEmbeddings # Example for document loading (from url), splitting, and creating vectostore """ # Load from langchain_community.document_loaders import WebBaseLoader loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") data = loader.load() # Split from langchain.text_splitter import RecursiveCharacterTextSplitter text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) all_splits = text_splitter.split_documents(data) # Add to vectorDB vectorstore = LanceDB.from_documents(documents=all_splits, embedding=OpenAIEmbeddings()) retriever = vectorstore.as_retriever() """ # Embed a single document for test vectorstore = LanceDB.from_texts( ["harrison worked at kensho"], embedding=OpenAIEmbeddings() ) retriever = vectorstore.as_retriever() # RAG prompt template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) # LLM model = ChatOpenAI() # RAG chain chain = ( RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) | prompt | model | StrOutputParser() ) class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-jaguardb/README.md
# rag-jaguardb This template performs RAG using JaguarDB and OpenAI. ## Environment Setup You should export two environment variables, one being your Jaguar URI, the other being your OpenAI API KEY. If you do not have JaguarDB set up, see the `Setup Jaguar` section at the bottom for instructions on how to do so. ```shell export JAGUAR_API_KEY=... export OPENAI_API_KEY=... ``` ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-jaguardb ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-jagaurdb ``` And add the following code to your `server.py` file: ```python from rag_jaguardb import chain as rag_jaguardb add_routes(app, rag_jaguardb_chain, path="/rag-jaguardb") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-jaguardb/playground](http://127.0.0.1:8000/rag-jaguardb/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-jaguardb") ``` ## JaguarDB Setup To utilize JaguarDB, you can use docker pull and docker run commands to quickly setup JaguarDB. ```shell docker pull jaguardb/jaguardb docker run -d -p 8888:8888 --name jaguardb jaguardb/jaguardb ``` To launch the JaguarDB client terminal to interact with JaguarDB server: ```shell docker exec -it jaguardb /home/jaguar/jaguar/bin/jag ``` Another option is to download an already-built binary package of JaguarDB on Linux, and deploy the database on a single node or in a cluster of nodes. The streamlined process enables you to quickly start using JaguarDB and leverage its powerful features and functionalities. [here](http://www.jaguardb.com/download.html).
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-jaguardb/rag_jaguardb.ipynb
{ "cells": [ { "cell_type": "markdown", "id": "681a5d1e", "metadata": {}, "source": [ "## Run Template\n", "\n", "In `server.py`, set -\n", "```\n", "add_routes(app, rag_jaguardb_chain, path=\"/rag-jaguardb\")\n", "```" ] }, { "cell_type": "code", "execution_count": null, "id": "d774be2a", "metadata": {}, "outputs": [], "source": [ "from langserve.client import RemoteRunnable\n", "\n", "rag_app = RemoteRunnable(\"http://localhost:8001/rag-jaguardb\")\n", "rag_app.invoke(\"hello!\")" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.16" } }, "nbformat": 4, "nbformat_minor": 5 }
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-jaguardb/rag_jaguardb/__init__.py
from rag_jaguardb import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-jaguardb/rag_jaguardb/chain.py
import os from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores.jaguar import Jaguar from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import ( RunnableParallel, RunnablePassthrough, ) if os.environ.get("JAGUAR_API_KEY", None) is None: raise Exception("Missing `JAGUAR_API_KEY` environment variable.") JAGUAR_API_KEY = os.environ["JAGUAR_API_KEY"] url = "http://192.168.3.88:8080/fwww/" pod = "vdb" store = "langchain_test_store" vector_index = "v" vector_type = "cosine_fraction_float" vector_dimension = 1536 embeddings = OpenAIEmbeddings() vectorstore = Jaguar( pod, store, vector_index, vector_type, vector_dimension, url, embeddings ) retriever = vectorstore.as_retriever() vectorstore.login() """ Create vector store on the JaguarDB database server. This should be done only once. """ metadata = "category char(16)" text_size = 4096 vectorstore.create(metadata, text_size) # RAG prompt template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) # RAG model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) chain = ( RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) | prompt | model | StrOutputParser() ) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-gpt-crawler/README.md
# rag-gpt-crawler GPT-crawler will crawl websites to produce files for use in custom GPTs or other apps (RAG). This template uses [gpt-crawler](https://github.com/BuilderIO/gpt-crawler) to build a RAG app ## Environment Setup Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. ## Crawling Run GPT-crawler to extact content from a set of urls, using the config file in GPT-crawler repo. Here is example config for LangChain use-case docs: ``` export const config: Config = { url: "https://python.langchain.com/docs/use_cases/", match: "https://python.langchain.com/docs/use_cases/**", selector: ".docMainContainer_gTbr", maxPagesToCrawl: 10, outputFileName: "output.json", }; ``` Then, run this as described in the [gpt-crawler](https://github.com/BuilderIO/gpt-crawler) README: ``` npm start ``` And copy the `output.json` file into the folder containing this README. ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-gpt-crawler ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-gpt-crawler ``` And add the following code to your `server.py` file: ```python from rag_chroma import chain as rag_gpt_crawler add_routes(app, rag_gpt_crawler, path="/rag-gpt-crawler") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-gpt-crawler/playground](http://127.0.0.1:8000/rag-gpt-crawler/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-gpt-crawler") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-gpt-crawler/rag_gpt_crawler.ipynb
{ "cells": [ { "cell_type": "markdown", "id": "681a5d1e", "metadata": {}, "source": [ "## Run Template\n", "\n", "In `server.py`, set -\n", "```\n", "add_routes(app, chain_rag_conv, path=\"/rag-gpt-crawler\")\n", "```" ] }, { "cell_type": "code", "execution_count": null, "id": "d774be2a", "metadata": {}, "outputs": [], "source": [ "from langserve.client import RemoteRunnable\n", "\n", "rag_app = RemoteRunnable(\"http://localhost:8001/rag-gpt-crawler\")\n", "rag_app.invoke(\"How does summarization work?\")" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.16" } }, "nbformat": 4, "nbformat_minor": 5 }
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-gpt-crawler/rag_gpt_crawler/__init__.py
from rag_gpt_crawler.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-gpt-crawler/rag_gpt_crawler/chain.py
import json from pathlib import Path from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough from langchain_text_splitters import RecursiveCharacterTextSplitter # Load output from gpt crawler path_to_gptcrawler = Path(__file__).parent.parent / "output.json" data = json.loads(Path(path_to_gptcrawler).read_text()) docs = [ Document( page_content=dict_["html"], metadata={"title": dict_["title"], "url": dict_["url"]}, ) for dict_ in data ] # Split text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100) all_splits = text_splitter.split_documents(docs) # Add to vectorDB vectorstore = Chroma.from_documents( documents=all_splits, collection_name="rag-gpt-builder", embedding=OpenAIEmbeddings(), ) retriever = vectorstore.as_retriever() # RAG prompt template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) # LLM model = ChatOpenAI() # RAG chain chain = ( RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) | prompt | model | StrOutputParser() ) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-google-cloud-vertexai-search/README.md
# rag-google-cloud-vertexai-search This template is an application that utilizes Google Vertex AI Search, a machine learning powered search service, and PaLM 2 for Chat (chat-bison). The application uses a Retrieval chain to answer questions based on your documents. For more context on building RAG applications with Vertex AI Search, check [here](https://cloud.google.com/generative-ai-app-builder/docs/enterprise-search-introduction). ## Environment Setup Before using this template, please ensure that you are authenticated with Vertex AI Search. See the authentication guide: [here](https://cloud.google.com/generative-ai-app-builder/docs/authentication). You will also need to create: - A search application [here](https://cloud.google.com/generative-ai-app-builder/docs/create-engine-es) - A data store [here](https://cloud.google.com/generative-ai-app-builder/docs/create-data-store-es) A suitable dataset to test this template with is the Alphabet Earnings Reports, which you can find [here](https://abc.xyz/investor/). The data is also available at `gs://cloud-samples-data/gen-app-builder/search/alphabet-investor-pdfs`. Set the following environment variables: * `GOOGLE_CLOUD_PROJECT_ID` - Your Google Cloud project ID. * `DATA_STORE_ID` - The ID of the data store in Vertex AI Search, which is a 36-character alphanumeric value found on the data store details page. * `MODEL_TYPE` - The model type for Vertex AI Search. ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-google-cloud-vertexai-search ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-google-cloud-vertexai-search ``` And add the following code to your `server.py` file: ```python from rag_google_cloud_vertexai_search.chain import chain as rag_google_cloud_vertexai_search_chain add_routes(app, rag_google_cloud_vertexai_search_chain, path="/rag-google-cloud-vertexai-search") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-google-cloud-vertexai-search/playground](http://127.0.0.1:8000/rag-google-cloud-vertexai-search/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-google-cloud-vertexai-search") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-google-cloud-vertexai-search/main.py
from rag_google_cloud_vertexai_search.chain import chain if __name__ == "__main__": query = "Who is the CEO of Google Cloud?" print(chain.invoke(query))
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-google-cloud-vertexai-search/rag_google_cloud_vertexai_search/__init__.py
from rag_google_cloud_vertexai_search.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-google-cloud-vertexai-search/rag_google_cloud_vertexai_search/chain.py
import os from langchain.retrievers import GoogleVertexAISearchRetriever from langchain_community.chat_models import ChatVertexAI from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough # Get project, data store, and model type from env variables project_id = os.environ.get("GOOGLE_CLOUD_PROJECT_ID") data_store_id = os.environ.get("DATA_STORE_ID") model_type = os.environ.get("MODEL_TYPE") if not data_store_id: raise ValueError( "No value provided in env variable 'DATA_STORE_ID'. " "A data store is required to run this application." ) # Set LLM and embeddings model = ChatVertexAI(model_name=model_type, temperature=0.0) # Create Vertex AI retriever retriever = GoogleVertexAISearchRetriever( project_id=project_id, search_engine_id=data_store_id ) # RAG prompt template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) # RAG chain = ( RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) | prompt | model | StrOutputParser() ) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-google-cloud-sensitive-data-protection/README.md
# rag-google-cloud-sensitive-data-protection This template is an application that utilizes Google Vertex AI Search, a machine learning powered search service, and PaLM 2 for Chat (chat-bison). The application uses a Retrieval chain to answer questions based on your documents. This template is an application that utilizes Google Sensitive Data Protection, a service for detecting and redacting sensitive data in text, and PaLM 2 for Chat (chat-bison), although you can use any model. For more context on using Sensitive Data Protection, check [here](https://cloud.google.com/dlp/docs/sensitive-data-protection-overview). ## Environment Setup Before using this template, please ensure that you enable the [DLP API](https://console.cloud.google.com/marketplace/product/google/dlp.googleapis.com) and [Vertex AI API](https://console.cloud.google.com/marketplace/product/google/aiplatform.googleapis.com) in your Google Cloud project. For some common environment troubleshooting steps related to Google Cloud, see the bottom of this readme. Set the following environment variables: * `GOOGLE_CLOUD_PROJECT_ID` - Your Google Cloud project ID. * `MODEL_TYPE` - The model type for Vertex AI Search (e.g. `chat-bison`) ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-google-cloud-sensitive-data-protection ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-google-cloud-sensitive-data-protection ``` And add the following code to your `server.py` file: ```python from rag_google_cloud_sensitive_data_protection.chain import chain as rag_google_cloud_sensitive_data_protection_chain add_routes(app, rag_google_cloud_sensitive_data_protection_chain, path="/rag-google-cloud-sensitive-data-protection") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-google-cloud-vertexai-search/playground](http://127.0.0.1:8000/rag-google-cloud-vertexai-search/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-google-cloud-sensitive-data-protection") ``` ``` # Troubleshooting Google Cloud You can set your `gcloud` credentials with their CLI using `gcloud auth application-default login` You can set your `gcloud` project with the following commands ```bash gcloud config set project <your project> gcloud auth application-default set-quota-project <your project> export GOOGLE_CLOUD_PROJECT_ID=<your project> ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-google-cloud-sensitive-data-protection/main.py
from rag_google_cloud_sensitive_data_protection.chain import chain if __name__ == "__main__": query = { "question": "Good morning. My name is Captain Blackbeard. My phone number " "is 555-555-5555. And my email is lovely.pirate@gmail.com. Have a nice day.", "chat_history": [], } print(chain.invoke(query))
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-google-cloud-sensitive-data-protection/rag_google_cloud_sensitive_data_protection/__init__.py
from rag_google_cloud_sensitive_data_protection.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-google-cloud-sensitive-data-protection/rag_google_cloud_sensitive_data_protection/chain.py
import os from typing import List, Tuple from google.cloud import dlp_v2 from langchain_community.chat_models import ChatVertexAI from langchain_core.messages import AIMessage, HumanMessage from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import RunnableLambda, RunnableParallel # Formatting for chat history def _format_chat_history(chat_history: List[Tuple[str, str]]): buffer = [] for human, ai in chat_history: buffer.append(HumanMessage(content=human)) buffer.append(AIMessage(content=ai)) return buffer def _deidentify_with_replace( input_str: str, info_types: List[str], project: str, ) -> str: """Uses the Data Loss Prevention API to deidentify sensitive data in a string by replacing matched input values with the info type. Args: project: The Google Cloud project id to use as a parent resource. input_str: The string to deidentify (will be treated as text). info_types: A list of strings representing info types to look for. Returns: str: The input string after it has been deidentified. """ # Instantiate a client dlp = dlp_v2.DlpServiceClient() # Convert the project id into a full resource id. parent = f"projects/{project}/locations/global" if info_types is None: info_types = ["PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD_NUMBER"] # Construct inspect configuration dictionary inspect_config = {"info_types": [{"name": info_type} for info_type in info_types]} # Construct deidentify configuration dictionary deidentify_config = { "info_type_transformations": { "transformations": [ {"primitive_transformation": {"replace_with_info_type_config": {}}} ] } } # Construct item item = {"value": input_str} # Call the API response = dlp.deidentify_content( request={ "parent": parent, "deidentify_config": deidentify_config, "inspect_config": inspect_config, "item": item, } ) # Print out the results. return response.item.value # Prompt we will use prompt = ChatPromptTemplate.from_messages( [ ( "system", "You are a helpful assistant who translates to pirate", ), MessagesPlaceholder(variable_name="chat_history"), ("user", "{question}"), ] ) # Create Vertex AI retriever project_id = os.environ.get("GOOGLE_CLOUD_PROJECT_ID") model_type = os.environ.get("MODEL_TYPE") # Set LLM and embeddings model = ChatVertexAI(model_name=model_type, temperature=0.0) class ChatHistory(BaseModel): question: str chat_history: List[Tuple[str, str]] = Field(..., extra={"widget": {"type": "chat"}}) _inputs = RunnableParallel( { "question": RunnableLambda( lambda x: _deidentify_with_replace( input_str=x["question"], info_types=["PERSON_NAME", "PHONE_NUMBER", "EMAIL_ADDRESS"], project=project_id, ) ).with_config(run_name="<lambda> _deidentify_with_replace"), "chat_history": RunnableLambda( lambda x: _format_chat_history(x["chat_history"]) ).with_config(run_name="<lambda> _format_chat_history"), } ) # RAG chain = _inputs | prompt | model | StrOutputParser() chain = chain.with_types(input_type=ChatHistory).with_config(run_name="Inputs")
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-gemini-multi-modal/README.md
# rag-gemini-multi-modal Multi-modal LLMs enable visual assistants that can perform question-answering about images. This template create a visual assistant for slide decks, which often contain visuals such as graphs or figures. It uses OpenCLIP embeddings to embed all of the slide images and stores them in Chroma. Given a question, relevant slides are retrieved and passed to [Google Gemini](https://deepmind.google/technologies/gemini/#introduction) for answer synthesis. ![Diagram illustrating the process of a visual assistant using multi-modal LLM, from slide deck images to OpenCLIP embedding, retrieval, and synthesis with Google Gemini, resulting in an answer.](https://github.com/langchain-ai/langchain/assets/122662504/b9e69bef-d687-4ecf-a599-937e559d5184 "Workflow Diagram for Visual Assistant Using Multi-modal LLM") ## Input Supply a slide deck as pdf in the `/docs` directory. By default, this template has a slide deck about Q3 earnings from DataDog, a public technology company. Example questions to ask can be: ``` How many customers does Datadog have? What is Datadog platform % Y/Y growth in FY20, FY21, and FY22? ``` To create an index of the slide deck, run: ``` poetry install python ingest.py ``` ## Storage This template will use [OpenCLIP](https://github.com/mlfoundations/open_clip) multi-modal embeddings to embed the images. You can select different embedding model options (see results [here](https://github.com/mlfoundations/open_clip/blob/main/docs/openclip_results.csv)). The first time you run the app, it will automatically download the multimodal embedding model. By default, LangChain will use an embedding model with moderate performance but lower memory requirements, `ViT-H-14`. You can choose alternative `OpenCLIPEmbeddings` models in `rag_chroma_multi_modal/ingest.py`: ``` vectorstore_mmembd = Chroma( collection_name="multi-modal-rag", persist_directory=str(re_vectorstore_path), embedding_function=OpenCLIPEmbeddings( model_name="ViT-H-14", checkpoint="laion2b_s32b_b79k" ), ) ``` ## LLM The app will retrieve images using multi-modal embeddings, and pass them to Google Gemini. ## Environment Setup Set your `GOOGLE_API_KEY` environment variable in order to access Gemini. ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-gemini-multi-modal ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-gemini-multi-modal ``` And add the following code to your `server.py` file: ```python from rag_gemini_multi_modal import chain as rag_gemini_multi_modal_chain add_routes(app, rag_gemini_multi_modal_chain, path="/rag-gemini-multi-modal") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-gemini-multi-modal/playground](http://127.0.0.1:8000/rag-gemini-multi-modal/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-gemini-multi-modal") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-gemini-multi-modal/ingest.py
import os from pathlib import Path import pypdfium2 as pdfium from langchain_community.vectorstores import Chroma from langchain_experimental.open_clip import OpenCLIPEmbeddings def get_images_from_pdf(pdf_path, img_dump_path): """ Extract images from each page of a PDF document and save as JPEG files. :param pdf_path: A string representing the path to the PDF file. :param img_dump_path: A string representing the path to dummp images. """ pdf = pdfium.PdfDocument(pdf_path) n_pages = len(pdf) for page_number in range(n_pages): page = pdf.get_page(page_number) bitmap = page.render(scale=1, rotation=0, crop=(0, 0, 0, 0)) pil_image = bitmap.to_pil() pil_image.save(f"{img_dump_path}/img_{page_number + 1}.jpg", format="JPEG") # Load PDF doc_path = Path(__file__).parent / "docs/DDOG_Q3_earnings_deck.pdf" img_dump_path = Path(__file__).parent / "docs/" rel_doc_path = doc_path.relative_to(Path.cwd()) rel_img_dump_path = img_dump_path.relative_to(Path.cwd()) print("pdf index") pil_images = get_images_from_pdf(rel_doc_path, rel_img_dump_path) print("done") vectorstore = Path(__file__).parent / "chroma_db_multi_modal" re_vectorstore_path = vectorstore.relative_to(Path.cwd()) # Load embedding function print("Loading embedding function") embedding = OpenCLIPEmbeddings(model_name="ViT-H-14", checkpoint="laion2b_s32b_b79k") # Create chroma vectorstore_mmembd = Chroma( collection_name="multi-modal-rag", persist_directory=str(Path(__file__).parent / "chroma_db_multi_modal"), embedding_function=embedding, ) # Get image URIs image_uris = sorted( [ os.path.join(rel_img_dump_path, image_name) for image_name in os.listdir(rel_img_dump_path) if image_name.endswith(".jpg") ] ) # Add images print("Embedding images") vectorstore_mmembd.add_images(uris=image_uris)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-gemini-multi-modal/rag_gemini_multi_modal.ipynb
{ "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "681a5d1e", "metadata": {}, "source": [ "## Run Template\n", "\n", "In `server.py`, set -\n", "```\n", "add_routes(app, chain_rag_conv, path=\"/rag-gemini-multi-modal\")\n", "```" ] }, { "cell_type": "code", "execution_count": null, "id": "d774be2a", "metadata": {}, "outputs": [], "source": [ "from langserve.client import RemoteRunnable\n", "\n", "rag_app = RemoteRunnable(\"http://localhost:8001/rag-gemini-multi-modal\")\n", "rag_app.invoke(\"What is the projected TAM for observability expected for each year through 2026?\")" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.16" } }, "nbformat": 4, "nbformat_minor": 5 }
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-gemini-multi-modal/rag_gemini_multi_modal/__init__.py
from rag_gemini_multi_modal.chain import chain __all__ = ["chain"]
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-gemini-multi-modal/rag_gemini_multi_modal/chain.py
import base64 import io from pathlib import Path from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableLambda, RunnablePassthrough from langchain_experimental.open_clip import OpenCLIPEmbeddings from langchain_google_genai import ChatGoogleGenerativeAI from PIL import Image def resize_base64_image(base64_string, size=(128, 128)): """ Resize an image encoded as a Base64 string. :param base64_string: A Base64 encoded string of the image to be resized. :param size: A tuple representing the new size (width, height) for the image. :return: A Base64 encoded string of the resized image. """ img_data = base64.b64decode(base64_string) img = Image.open(io.BytesIO(img_data)) resized_img = img.resize(size, Image.LANCZOS) buffered = io.BytesIO() resized_img.save(buffered, format=img.format) return base64.b64encode(buffered.getvalue()).decode("utf-8") def get_resized_images(docs): """ Resize images from base64-encoded strings. :param docs: A list of base64-encoded image to be resized. :return: Dict containing a list of resized base64-encoded strings. """ b64_images = [] for doc in docs: if isinstance(doc, Document): doc = doc.page_content resized_image = resize_base64_image(doc, size=(1280, 720)) b64_images.append(resized_image) return {"images": b64_images} def img_prompt_func(data_dict, num_images=2): """ Gemini prompt for image analysis. :param data_dict: A dict with images and a user-provided question. :param num_images: Number of images to include in the prompt. :return: A list containing message objects for each image and the text prompt. """ messages = [] if data_dict["context"]["images"]: for image in data_dict["context"]["images"][:num_images]: image_message = { "type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image}"}, } messages.append(image_message) text_message = { "type": "text", "text": ( "You are an analyst tasked with answering questions about visual content.\n" "You will be give a set of image(s) from a slide deck / presentation.\n" "Use this information to answer the user question. \n" f"User-provided question: {data_dict['question']}\n\n" ), } messages.append(text_message) return [HumanMessage(content=messages)] def multi_modal_rag_chain(retriever): """ Multi-modal RAG chain, :param retriever: A function that retrieves the necessary context for the model. :return: A chain of functions representing the multi-modal RAG process. """ # Initialize the multi-modal Large Language Model with specific parameters model = ChatGoogleGenerativeAI(model="gemini-pro-vision") # Define the RAG pipeline chain = ( { "context": retriever | RunnableLambda(get_resized_images), "question": RunnablePassthrough(), } | RunnableLambda(img_prompt_func) | model | StrOutputParser() ) return chain # Load chroma vectorstore_mmembd = Chroma( collection_name="multi-modal-rag", persist_directory=str(Path(__file__).parent.parent / "chroma_db_multi_modal"), embedding_function=OpenCLIPEmbeddings( model_name="ViT-H-14", checkpoint="laion2b_s32b_b79k" ), ) # Make retriever retriever_mmembd = vectorstore_mmembd.as_retriever() # Create RAG chain chain = multi_modal_rag_chain(retriever_mmembd) # Add typing for input class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-fusion/README.md
# rag-fusion This template enables RAG fusion using a re-implementation of the project found [here](https://github.com/Raudaschl/rag-fusion). It performs multiple query generation and Reciprocal Rank Fusion to re-rank search results. ## Environment Setup Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. ## Usage To use this package, you should first have the LangChain CLI installed: ```shell pip install -U langchain-cli ``` To create a new LangChain project and install this as the only package, you can do: ```shell langchain app new my-app --package rag-fusion ``` If you want to add this to an existing project, you can just run: ```shell langchain app add rag-fusion ``` And add the following code to your `server.py` file: ```python from rag_fusion.chain import chain as rag_fusion_chain add_routes(app, rag_fusion_chain, path="/rag-fusion") ``` (Optional) Let's now configure LangSmith. LangSmith will help us trace, monitor and debug LangChain applications. You can sign up for LangSmith [here](https://smith.langchain.com/). If you don't have access, you can skip this section ```shell export LANGCHAIN_TRACING_V2=true export LANGCHAIN_API_KEY=<your-api-key> export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default" ``` If you are inside this directory, then you can spin up a LangServe instance directly by: ```shell langchain serve ``` This will start the FastAPI app with a server is running locally at [http://localhost:8000](http://localhost:8000) We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) We can access the playground at [http://127.0.0.1:8000/rag-fusion/playground](http://127.0.0.1:8000/rag-fusion/playground) We can access the template from code with: ```python from langserve.client import RemoteRunnable runnable = RemoteRunnable("http://localhost:8000/rag-fusion") ```
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-fusion/ingest.py
from langchain_community.embeddings import OpenAIEmbeddings from langchain_pinecone import PineconeVectorStore all_documents = { "doc1": "Climate change and economic impact.", "doc2": "Public health concerns due to climate change.", "doc3": "Climate change: A social perspective.", "doc4": "Technological solutions to climate change.", "doc5": "Policy changes needed to combat climate change.", "doc6": "Climate change and its impact on biodiversity.", "doc7": "Climate change: The science and models.", "doc8": "Global warming: A subset of climate change.", "doc9": "How climate change affects daily weather.", "doc10": "The history of climate change activism.", } PineconeVectorStore.from_texts( list(all_documents.values()), OpenAIEmbeddings(), index_name="rag-fusion" )
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-fusion/main.py
from rag_fusion.chain import chain if __name__ == "__main__": original_query = "impact of climate change" print(chain.invoke(original_query))
Wed, 26 Jun 2024 13:15:51 GMT
https://github.com/langchain-ai/langchain/blob/master/templates/rag-fusion/rag_fusion/chain.py
from langchain import hub from langchain.load import dumps, loads from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_pinecone import PineconeVectorStore def reciprocal_rank_fusion(results: list[list], k=60): fused_scores = {} for docs in results: # Assumes the docs are returned in sorted order of relevance for rank, doc in enumerate(docs): doc_str = dumps(doc) if doc_str not in fused_scores: fused_scores[doc_str] = 0 fused_scores[doc_str] += 1 / (rank + k) reranked_results = [ (loads(doc), score) for doc, score in sorted(fused_scores.items(), key=lambda x: x[1], reverse=True) ] return reranked_results prompt = hub.pull("langchain-ai/rag-fusion-query-generation") generate_queries = ( prompt | ChatOpenAI(temperature=0) | StrOutputParser() | (lambda x: x.split("\n")) ) vectorstore = PineconeVectorStore.from_existing_index("rag-fusion", OpenAIEmbeddings()) retriever = vectorstore.as_retriever() chain = ( {"original_query": lambda x: x} | generate_queries | retriever.map() | reciprocal_rank_fusion ) # Add typed inputs to chain for playground class Question(BaseModel): __root__: str chain = chain.with_types(input_type=Question)
Wed, 26 Jun 2024 13:15:51 GMT