prompt
stringlengths
51
10k
completion
stringlengths
8
362
api
stringlengths
18
90
import os os.environ["LANGCHAIN_WANDB_TRACING"] = "true" os.environ["WANDB_PROJECT"] = "langchain-tracing" from langchain.agents import AgentType, initialize_agent, load_tools from langchain.callbacks import wandb_tracing_enabled from langchain_openai import OpenAI llm = OpenAI(temperature=0) tools =
load_tools(["llm-math"], llm=llm)
langchain.agents.load_tools
from langchain_community.llms import Baseten mistral =
Baseten(model="MODEL_ID", deployment="production")
langchain_community.llms.Baseten
from langchain_community.document_loaders import ArcGISLoader URL = "https://maps1.vcgov.org/arcgis/rest/services/Beaches/MapServer/7" loader =
ArcGISLoader(URL)
langchain_community.document_loaders.ArcGISLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pinecone-client pinecone-text') import getpass import os os.environ["PINECONE_API_KEY"] = getpass.getpass("Pinecone API Key:") from langchain.retrievers import PineconeHybridSearchRetriever os.environ["PINECONE_ENVIRONMENT"] = getpass.getpass("Pinecone Environment:") os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") import os import pinecone api_key = os.getenv("PINECONE_API_KEY") or "PINECONE_API_KEY" index_name = "langchain-pinecone-hybrid-search" pinecone.create_index( name=index_name, dimension=1536, # dimensionality of dense model metric="dotproduct", # sparse values supported only for dotproduct pod_type="s1", metadata_config={"indexed": []}, # see explanation above ) index = pinecone.Index(index_name) from langchain_openai import OpenAIEmbeddings embeddings =
OpenAIEmbeddings()
langchain_openai.OpenAIEmbeddings
import os from langchain.indexes import VectorstoreIndexCreator from langchain_community.document_loaders import SpreedlyLoader spreedly_loader = SpreedlyLoader( os.environ["SPREEDLY_ACCESS_TOKEN"], "gateways_options" ) index =
VectorstoreIndexCreator()
langchain.indexes.VectorstoreIndexCreator
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-community langchainhub gpt4all chromadb') from langchain_community.document_loaders import WebBaseLoader from langchain_text_splitters import RecursiveCharacterTextSplitter loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") data = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) all_splits = text_splitter.split_documents(data) from langchain_community.embeddings import GPT4AllEmbeddings from langchain_community.vectorstores import Chroma vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings()) question = "What are the approaches to Task Decomposition?" docs = vectorstore.similarity_search(question) len(docs) docs[0] get_ipython().run_line_magic('pip', 'install --upgrade --quiet llama-cpp-python') get_ipython().system(' CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 /Users/rlm/miniforge3/envs/llama/bin/pip install -U llama-cpp-python --no-cache-dir') from langchain_community.llms import LlamaCpp n_gpu_layers = 1 # Metal set to 1 is enough. n_batch = 512 # Should be between 1 and n_ctx, consider the amount of RAM of your Apple Silicon Chip. llm = LlamaCpp( model_path="/Users/rlm/Desktop/Code/llama.cpp/models/llama-2-13b-chat.ggufv3.q4_0.bin", n_gpu_layers=n_gpu_layers, n_batch=n_batch, n_ctx=2048, f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls verbose=True, ) llm.invoke("Simulate a rap battle between Stephen Colbert and John Oliver") from langchain_community.llms import GPT4All gpt4all = GPT4All( model="/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin", max_tokens=2048, ) from langchain_community.llms.llamafile import Llamafile llamafile = Llamafile() llamafile.invoke("Here is my grandmother's beloved recipe for spaghetti and meatballs:") from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import PromptTemplate prompt = PromptTemplate.from_template( "Summarize the main themes in these retrieved docs: {docs}" ) def format_docs(docs): return "\n\n".join(doc.page_content for doc in docs) chain = {"docs": format_docs} | prompt | llm | StrOutputParser() question = "What are the approaches to Task Decomposition?" docs = vectorstore.similarity_search(question) chain.invoke(docs) from langchain import hub rag_prompt = hub.pull("rlm/rag-prompt") rag_prompt.messages from langchain_core.runnables import RunnablePassthrough, RunnablePick chain = ( RunnablePassthrough.assign(context=RunnablePick("context") | format_docs) | rag_prompt | llm | StrOutputParser() ) chain.invoke({"context": docs, "question": question}) rag_prompt_llama =
hub.pull("rlm/rag-prompt-llama")
langchain.hub.pull
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-core langchain langchain-openai') from langchain.utils.math import cosine_similarity from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import PromptTemplate from langchain_core.runnables import RunnableLambda, RunnablePassthrough from langchain_openai import ChatOpenAI, OpenAIEmbeddings physics_template = """You are a very smart physics professor. \ You are great at answering questions about physics in a concise and easy to understand manner. \ When you don't know the answer to a question you admit that you don't know. Here is a question: {query}""" math_template = """You are a very good mathematician. You are great at answering math questions. \ You are so good because you are able to break down hard problems into their component parts, \ answer the component parts, and then put them together to answer the broader question. Here is a question: {query}""" embeddings = OpenAIEmbeddings() prompt_templates = [physics_template, math_template] prompt_embeddings = embeddings.embed_documents(prompt_templates) def prompt_router(input): query_embedding = embeddings.embed_query(input["query"]) similarity = cosine_similarity([query_embedding], prompt_embeddings)[0] most_similar = prompt_templates[similarity.argmax()] print("Using MATH" if most_similar == math_template else "Using PHYSICS") return PromptTemplate.from_template(most_similar) chain = ( {"query": RunnablePassthrough()} | RunnableLambda(prompt_router) |
ChatOpenAI()
langchain_openai.ChatOpenAI
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-api-python-client > /dev/null') get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-auth-oauthlib > /dev/null') get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-auth-httplib2 > /dev/null') get_ipython().run_line_magic('pip', 'install --upgrade --quiet beautifulsoup4 > /dev/null # This is optional but is useful for parsing HTML messages') from langchain_community.agent_toolkits import GmailToolkit toolkit =
GmailToolkit()
langchain_community.agent_toolkits.GmailToolkit
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-cloud-storage') from langchain_community.document_loaders import GCSFileLoader loader =
GCSFileLoader(project_name="aist", bucket="testing-hwc", blob="fake.docx")
langchain_community.document_loaders.GCSFileLoader
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)') get_ipython().system(' pip install "unstructured[all-docs]==0.10.19" pillow pydantic lxml pillow matplotlib tiktoken open_clip_torch torch') path = "/Users/rlm/Desktop/cpi/" from langchain_community.document_loaders import PyPDFLoader loader =
PyPDFLoader(path + "cpi.pdf")
langchain_community.document_loaders.PyPDFLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai wikipedia') from operator import itemgetter from langchain.agents import AgentExecutor, load_tools from langchain.agents.format_scratchpad import format_to_openai_function_messages from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser from langchain.tools import WikipediaQueryRun from langchain_community.utilities import WikipediaAPIWrapper from langchain_core.prompt_values import ChatPromptValue from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_openai import ChatOpenAI wiki = WikipediaQueryRun( api_wrapper=WikipediaAPIWrapper(top_k_results=5, doc_content_chars_max=10_000) ) tools = [wiki] prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a helpful assistant"), ("user", "{input}"), MessagesPlaceholder(variable_name="agent_scratchpad"), ] ) llm = ChatOpenAI(model="gpt-3.5-turbo") agent = ( { "input": itemgetter("input"), "agent_scratchpad": lambda x: format_to_openai_function_messages( x["intermediate_steps"] ), } | prompt | llm.bind_functions(tools) |
OpenAIFunctionsAgentOutputParser()
langchain.agents.output_parsers.OpenAIFunctionsAgentOutputParser
from langchain.indexes import VectorstoreIndexCreator from langchain_community.document_loaders import ModernTreasuryLoader modern_treasury_loader = ModernTreasuryLoader("payment_orders") index =
VectorstoreIndexCreator()
langchain.indexes.VectorstoreIndexCreator
from langchain_experimental.llm_bash.base import LLMBashChain from langchain_openai import OpenAI llm = OpenAI(temperature=0) text = "Please write a bash script that prints 'Hello World' to the console." bash_chain = LLMBashChain.from_llm(llm, verbose=True) bash_chain.run(text) from langchain.prompts.prompt import PromptTemplate from langchain_experimental.llm_bash.prompt import BashOutputParser _PROMPT_TEMPLATE = """If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put "#!/bin/bash" in your answer. Make sure to reason step by step, using this format: Question: "copy the files in the directory named 'target' into a new directory at the same level as target called 'myNewDirectory'" I need to take the following actions: - List all files in the directory - Create a new directory - Copy the files from the first directory into the second directory ```bash ls mkdir myNewDirectory cp -r target/* myNewDirectory ``` Do not use 'echo' when writing the script. That is the format. Begin! Question: {question}""" PROMPT = PromptTemplate( input_variables=["question"], template=_PROMPT_TEMPLATE, output_parser=BashOutputParser(), ) bash_chain =
LLMBashChain.from_llm(llm, prompt=PROMPT, verbose=True)
langchain_experimental.llm_bash.base.LLMBashChain.from_llm
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass() from langchain_core.tools import tool @tool def multiply(first_int: int, second_int: int) -> int: """Multiply two integers together.""" return first_int * second_int @tool def add(first_int: int, second_int: int) -> int: "Add two integers." return first_int + second_int @tool def exponentiate(base: int, exponent: int) -> int: "Exponentiate the base to the exponent power." return base**exponent from operator import itemgetter from typing import Union from langchain.output_parsers import JsonOutputToolsParser from langchain_core.runnables import ( Runnable, RunnableLambda, RunnableMap, RunnablePassthrough, ) from langchain_openai import ChatOpenAI model = ChatOpenAI(model="gpt-3.5-turbo-1106") tools = [multiply, exponentiate, add] model_with_tools = model.bind_tools(tools) tool_map = {tool.name: tool for tool in tools} def call_tool(tool_invocation: dict) -> Union[str, Runnable]: """Function for dynamically constructing the end of the chain based on the model-selected tool.""" tool = tool_map[tool_invocation["type"]] return RunnablePassthrough.assign(output=itemgetter("args") | tool) call_tool_list = RunnableLambda(call_tool).map() chain = model_with_tools |
JsonOutputToolsParser()
langchain.output_parsers.JsonOutputToolsParser
import getpass import os os.environ["TAVILY_API_KEY"] = getpass.getpass() from langchain_community.tools.tavily_search import TavilySearchResults tool = TavilySearchResults() tool.invoke({"query": "What happened in the latest burning man floods"}) import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass() from langchain import hub from langchain.agents import AgentExecutor, create_openai_functions_agent from langchain_openai import ChatOpenAI instructions = """You are an assistant.""" base_prompt = hub.pull("langchain-ai/openai-functions-template") prompt = base_prompt.partial(instructions=instructions) llm = ChatOpenAI(temperature=0) tavily_tool = TavilySearchResults() tools = [tavily_tool] agent =
create_openai_functions_agent(llm, tools, prompt)
langchain.agents.create_openai_functions_agent
get_ipython().run_line_magic('pip', 'install --upgrade --quiet arxiv') from langchain import hub from langchain.agents import AgentExecutor, create_react_agent, load_tools from langchain_openai import ChatOpenAI llm = ChatOpenAI(temperature=0.0) tools = load_tools( ["arxiv"], ) prompt = hub.pull("hwchase17/react") agent = create_react_agent(llm, tools, prompt) agent_executor =
AgentExecutor(agent=agent, tools=tools, verbose=True)
langchain.agents.AgentExecutor
from langchain_community.document_loaders import IFixitLoader loader =
IFixitLoader("https://www.ifixit.com/Teardown/Banana+Teardown/811")
langchain_community.document_loaders.IFixitLoader
from langchain.prompts.pipeline import PipelinePromptTemplate from langchain.prompts.prompt import PromptTemplate full_template = """{introduction} {example} {start}""" full_prompt =
PromptTemplate.from_template(full_template)
langchain.prompts.prompt.PromptTemplate.from_template
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass() from langchain_core.tools import tool @tool def complex_tool(int_arg: int, float_arg: float, dict_arg: dict) -> int: """Do something complex with a complex tool.""" return int_arg * float_arg from langchain_openai import ChatOpenAI model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) model_with_tools = model.bind_tools( [complex_tool], tool_choice="complex_tool", ) from operator import itemgetter from langchain.output_parsers import JsonOutputKeyToolsParser from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough chain = ( model_with_tools | JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True) | complex_tool ) chain.invoke( "use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg" ) from typing import Any from langchain_core.runnables import RunnableConfig def try_except_tool(tool_args: dict, config: RunnableConfig) -> Runnable: try: complex_tool.invoke(tool_args, config=config) except Exception as e: return f"Calling tool with arguments:\n\n{tool_args}\n\nraised the following error:\n\n{type(e)}: {e}" chain = ( model_with_tools | JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True) | try_except_tool ) print( chain.invoke( "use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg" ) ) chain = ( model_with_tools | JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True) | complex_tool ) better_model = ChatOpenAI(model="gpt-4-1106-preview", temperature=0).bind_tools( [complex_tool], tool_choice="complex_tool" ) better_chain = ( better_model |
JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True)
langchain.output_parsers.JsonOutputKeyToolsParser
get_ipython().system(' pip install langchain unstructured[all-docs] pydantic lxml') from typing import Any from pydantic import BaseModel from unstructured.partition.pdf import partition_pdf path = "/Users/rlm/Desktop/Papers/LLaVA/" raw_pdf_elements = partition_pdf( filename=path + "LLaVA.pdf", extract_images_in_pdf=True, infer_table_structure=True, chunking_strategy="by_title", max_characters=4000, new_after_n_chars=3800, combine_text_under_n_chars=2000, image_output_dir_path=path, ) category_counts = {} for element in raw_pdf_elements: category = str(type(element)) if category in category_counts: category_counts[category] += 1 else: category_counts[category] = 1 unique_categories = set(category_counts.keys()) category_counts class Element(BaseModel): type: str text: Any categorized_elements = [] for element in raw_pdf_elements: if "unstructured.documents.elements.Table" in str(type(element)): categorized_elements.append(Element(type="table", text=str(element))) elif "unstructured.documents.elements.CompositeElement" in str(type(element)): categorized_elements.append(Element(type="text", text=str(element))) table_elements = [e for e in categorized_elements if e.type == "table"] print(len(table_elements)) text_elements = [e for e in categorized_elements if e.type == "text"] print(len(text_elements)) from langchain_community.chat_models import ChatOllama from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate prompt_text = """You are an assistant tasked with summarizing tables and text. \ Give a concise summary of the table or text. Table or text chunk: {element} """ prompt = ChatPromptTemplate.from_template(prompt_text) model = ChatOllama(model="llama2:13b-chat") summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser() texts = [i.text for i in text_elements if i.text != ""] text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5}) tables = [i.text for i in table_elements] table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5}) get_ipython().run_cell_magic('bash', '', '\n# Define the directory containing the images\nIMG_DIR=~/Desktop/Papers/LLaVA/\n\n# Loop through each image in the directory\nfor img in "${IMG_DIR}"*.jpg; do\n # Extract the base name of the image without extension\n base_name=$(basename "$img" .jpg)\n\n # Define the output file name based on the image name\n output_file="${IMG_DIR}${base_name}.txt"\n\n # Execute the command and save the output to the defined output file\n /Users/rlm/Desktop/Code/llama.cpp/bin/llava -m ../models/llava-7b/ggml-model-q5_k.gguf --mmproj ../models/llava-7b/mmproj-model-f16.gguf --temp 0.1 -p "Describe the image in detail. Be specific about graphs, such as bar plots." --image "$img" > "$output_file"\n\ndone\n') import glob import os file_paths = glob.glob(os.path.expanduser(os.path.join(path, "*.txt"))) img_summaries = [] for file_path in file_paths: with open(file_path, "r") as file: img_summaries.append(file.read()) cleaned_img_summary = [ s.split("clip_model_load: total allocated memory: 201.27 MB\n\n", 1)[1].strip() for s in img_summaries ] import uuid from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import InMemoryStore from langchain_community.embeddings import GPT4AllEmbeddings from langchain_community.vectorstores import Chroma from langchain_core.documents import Document vectorstore = Chroma( collection_name="summaries", embedding_function=GPT4AllEmbeddings() ) store =
InMemoryStore()
langchain.storage.InMemoryStore
from langchain_community.document_loaders import GitbookLoader loader =
GitbookLoader("https://docs.gitbook.com")
langchain_community.document_loaders.GitbookLoader
get_ipython().system(' pip install --quiet pypdf chromadb tiktoken openai langchain-together') from langchain_community.document_loaders import PyPDFLoader loader = PyPDFLoader("~/Desktop/mixtral.pdf") data = loader.load() from langchain_text_splitters import RecursiveCharacterTextSplitter text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=0) all_splits = text_splitter.split_documents(data) from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import Chroma """ from langchain_together.embeddings import TogetherEmbeddings embeddings = TogetherEmbeddings(model="togethercomputer/m2-bert-80M-8k-retrieval") """ vectorstore = Chroma.from_documents( documents=all_splits, collection_name="rag-chroma", embedding=OpenAIEmbeddings(), ) retriever = vectorstore.as_retriever() from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) from langchain_together import Together llm = Together( model="mistralai/Mixtral-8x7B-Instruct-v0.1", temperature=0.0, max_tokens=2000, top_k=1, ) chain = ( RunnableParallel({"context": retriever, "question":
RunnablePassthrough()
langchain_core.runnables.RunnablePassthrough
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent from langchain.chains import LLMChain from langchain.memory import ConversationBufferMemory from langchain_community.chat_message_histories import RedisChatMessageHistory from langchain_community.utilities import GoogleSearchAPIWrapper from langchain_openai import OpenAI search =
GoogleSearchAPIWrapper()
langchain_community.utilities.GoogleSearchAPIWrapper
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_openai.chat_models import ChatOpenAI model = ChatOpenAI() prompt = ChatPromptTemplate.from_messages( [ ( "system", "You're an assistant who's good at {ability}. Respond in 20 words or fewer", ), MessagesPlaceholder(variable_name="history"), ("human", "{input}"), ] ) runnable = prompt | model from langchain_community.chat_message_histories import ChatMessageHistory from langchain_core.chat_history import BaseChatMessageHistory from langchain_core.runnables.history import RunnableWithMessageHistory store = {} def get_session_history(session_id: str) -> BaseChatMessageHistory: if session_id not in store: store[session_id] =
ChatMessageHistory()
langchain_community.chat_message_histories.ChatMessageHistory
get_ipython().run_line_magic('pip', 'install --upgrade --quiet feedparser newspaper3k listparser') from langchain_community.document_loaders import RSSFeedLoader urls = ["https://news.ycombinator.com/rss"] loader = RSSFeedLoader(urls=urls) data = loader.load() print(len(data)) print(data[0].page_content) loader =
RSSFeedLoader(urls=urls, nlp=True)
langchain_community.document_loaders.RSSFeedLoader
get_ipython().system(' pip install --quiet pypdf chromadb tiktoken openai langchain-together') from langchain_community.document_loaders import PyPDFLoader loader = PyPDFLoader("~/Desktop/mixtral.pdf") data = loader.load() from langchain_text_splitters import RecursiveCharacterTextSplitter text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=0) all_splits = text_splitter.split_documents(data) from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import Chroma """ from langchain_together.embeddings import TogetherEmbeddings embeddings = TogetherEmbeddings(model="togethercomputer/m2-bert-80M-8k-retrieval") """ vectorstore = Chroma.from_documents( documents=all_splits, collection_name="rag-chroma", embedding=OpenAIEmbeddings(), ) retriever = vectorstore.as_retriever() from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough template = """Answer the question based only on the following context: {context} Question: {question} """ prompt =
ChatPromptTemplate.from_template(template)
langchain_core.prompts.ChatPromptTemplate.from_template
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai') get_ipython().run_line_magic('pip', 'install --upgrade --quiet psycopg2-binary') get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken') YBUSER = "[SANDBOX USER]" YBPASSWORD = "[SANDBOX PASSWORD]" YBDATABASE = "[SANDBOX_DATABASE]" YBHOST = "trialsandbox.sandbox.aws.yellowbrickcloud.com" OPENAI_API_KEY = "[OPENAI API KEY]" import os import pathlib import re import sys import urllib.parse as urlparse from getpass import getpass import psycopg2 from IPython.display import Markdown, display from langchain.chains import LLMChain, RetrievalQAWithSourcesChain from langchain.docstore.document import Document from langchain_community.vectorstores import Yellowbrick from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter yellowbrick_connection_string = ( f"postgres://{urlparse.quote(YBUSER)}:{YBPASSWORD}@{YBHOST}:5432/{YBDATABASE}" ) YB_DOC_DATABASE = "sample_data" YB_DOC_TABLE = "yellowbrick_documentation" embedding_table = "my_embeddings" os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) system_template = """If you don't know the answer, Make up your best guess.""" messages = [ SystemMessagePromptTemplate.from_template(system_template), HumanMessagePromptTemplate.from_template("{question}"), ] prompt =
ChatPromptTemplate.from_messages(messages)
langchain.prompts.chat.ChatPromptTemplate.from_messages
get_ipython().run_line_magic('pip', 'install --upgrade --quiet "unstructured[all-docs]"') from langchain_community.document_loaders import UnstructuredFileLoader loader = UnstructuredFileLoader("./example_data/state_of_the_union.txt") docs = loader.load() docs[0].page_content[:400] files = ["./example_data/whatsapp_chat.txt", "./example_data/layout-parser-paper.pdf"] loader =
UnstructuredFileLoader(files)
langchain_community.document_loaders.UnstructuredFileLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3') from langchain_community.document_loaders import S3DirectoryLoader loader = S3DirectoryLoader("testing-hwc") loader.load() loader =
S3DirectoryLoader("testing-hwc", prefix="fake")
langchain_community.document_loaders.S3DirectoryLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-nvidia-ai-endpoints') import getpass import os if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"): nvapi_key = getpass.getpass("Enter your NVIDIA API key: ") assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key" os.environ["NVIDIA_API_KEY"] = nvapi_key from langchain_nvidia_ai_endpoints import ChatNVIDIA llm = ChatNVIDIA(model="mixtral_8x7b") result = llm.invoke("Write a ballad about LangChain.") print(result.content) print(llm.batch(["What's 2*3?", "What's 2*6?"])) for chunk in llm.stream("How far can a seagull fly in one day?"): print(chunk.content, end="|") async for chunk in llm.astream( "How long does it take for monarch butterflies to migrate?" ): print(chunk.content, end="|") ChatNVIDIA.get_available_models() from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_nvidia_ai_endpoints import ChatNVIDIA prompt = ChatPromptTemplate.from_messages( [("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")] ) chain = prompt | ChatNVIDIA(model="llama2_13b") | StrOutputParser() for txt in chain.stream({"input": "What's your name?"}): print(txt, end="") prompt = ChatPromptTemplate.from_messages( [ ( "system", "You are an expert coding AI. Respond only in valid python; no narration whatsoever.", ), ("user", "{input}"), ] ) chain = prompt | ChatNVIDIA(model="llama2_code_70b") | StrOutputParser() for txt in chain.stream({"input": "How do I solve this fizz buzz problem?"}): print(txt, end="") from langchain_nvidia_ai_endpoints import ChatNVIDIA llm = ChatNVIDIA(model="nemotron_steerlm_8b") complex_result = llm.invoke( "What's a PB&J?", labels={"creativity": 0, "complexity": 3, "verbosity": 0} ) print("Un-creative\n") print(complex_result.content) print("\n\nCreative\n") creative_result = llm.invoke( "What's a PB&J?", labels={"creativity": 9, "complexity": 3, "verbosity": 9} ) print(creative_result.content) from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_nvidia_ai_endpoints import ChatNVIDIA prompt = ChatPromptTemplate.from_messages( [("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")] ) chain = ( prompt | ChatNVIDIA(model="nemotron_steerlm_8b").bind( labels={"creativity": 9, "complexity": 0, "verbosity": 9} ) | StrOutputParser() ) for txt in chain.stream({"input": "Why is a PB&J?"}): print(txt, end="") import IPython import requests image_url = "https://www.nvidia.com/content/dam/en-zz/Solutions/research/ai-playground/nvidia-picasso-3c33-p@2x.jpg" ## Large Image image_content = requests.get(image_url).content IPython.display.Image(image_content) from langchain_nvidia_ai_endpoints import ChatNVIDIA llm = ChatNVIDIA(model="playground_neva_22b") from langchain_core.messages import HumanMessage llm.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, {"type": "image_url", "image_url": {"url": image_url}}, ] ) ] ) from langchain_core.messages import HumanMessage llm.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, {"type": "image_url", "image_url": {"url": image_url}}, ] ) ], labels={"creativity": 0, "quality": 9, "complexity": 0, "verbosity": 0}, ) import IPython import requests image_url = "https://picsum.photos/seed/kitten/300/200" image_content = requests.get(image_url).content IPython.display.Image(image_content) import base64 from langchain_core.messages import HumanMessage b64_string = base64.b64encode(image_content).decode("utf-8") llm.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, { "type": "image_url", "image_url": {"url": f"data:image/png;base64,{b64_string}"}, }, ] ) ] ) base64_with_mime_type = f"data:image/png;base64,{b64_string}" llm.invoke(f'What\'s in this image?\n<img src="{base64_with_mime_type}" />') from langchain_nvidia_ai_endpoints import ChatNVIDIA kosmos =
ChatNVIDIA(model="kosmos_2")
langchain_nvidia_ai_endpoints.ChatNVIDIA
get_ipython().system('pip install -qU langchain-ibm') import os from getpass import getpass watsonx_api_key = getpass() os.environ["WATSONX_APIKEY"] = watsonx_api_key import os os.environ["WATSONX_URL"] = "your service instance url" os.environ["WATSONX_TOKEN"] = "your token for accessing the CPD cluster" os.environ["WATSONX_PASSWORD"] = "your password for accessing the CPD cluster" os.environ["WATSONX_USERNAME"] = "your username for accessing the CPD cluster" os.environ["WATSONX_INSTANCE_ID"] = "your instance_id for accessing the CPD cluster" parameters = { "decoding_method": "sample", "max_new_tokens": 100, "min_new_tokens": 1, "temperature": 0.5, "top_k": 50, "top_p": 1, } from langchain_ibm import WatsonxLLM watsonx_llm = WatsonxLLM( model_id="ibm/granite-13b-instruct-v2", url="https://us-south.ml.cloud.ibm.com", project_id="PASTE YOUR PROJECT_ID HERE", params=parameters, ) watsonx_llm = WatsonxLLM( model_id="ibm/granite-13b-instruct-v2", url="PASTE YOUR URL HERE", username="PASTE YOUR USERNAME HERE", password="PASTE YOUR PASSWORD HERE", instance_id="openshift", version="4.8", project_id="PASTE YOUR PROJECT_ID HERE", params=parameters, ) watsonx_llm = WatsonxLLM( deployment_id="PASTE YOUR DEPLOYMENT_ID HERE", url="https://us-south.ml.cloud.ibm.com", project_id="PASTE YOUR PROJECT_ID HERE", params=parameters, ) from langchain.prompts import PromptTemplate template = "Generate a random question about {topic}: Question: " prompt =
PromptTemplate.from_template(template)
langchain.prompts.PromptTemplate.from_template
import zipfile import requests def download_and_unzip(url: str, output_path: str = "file.zip") -> None: file_id = url.split("/")[-2] download_url = f"https://drive.google.com/uc?export=download&id={file_id}" response = requests.get(download_url) if response.status_code != 200: print("Failed to download the file.") return with open(output_path, "wb") as file: file.write(response.content) print(f"File {output_path} downloaded.") with zipfile.ZipFile(output_path, "r") as zip_ref: zip_ref.extractall() print(f"File {output_path} has been unzipped.") url = ( "https://drive.google.com/file/d/1rh1s1o2i7B-Sk1v9o8KNgivLVGwJ-osV/view?usp=sharing" ) download_and_unzip(url) directory_path = "./hogwarts" from langchain_community.chat_loaders.facebook_messenger import ( FolderFacebookMessengerChatLoader, SingleFileFacebookMessengerChatLoader, ) loader = SingleFileFacebookMessengerChatLoader( path="./hogwarts/inbox/HermioneGranger/messages_Hermione_Granger.json", ) chat_session = loader.load()[0] chat_session["messages"][:3] loader = FolderFacebookMessengerChatLoader( path="./hogwarts", ) chat_sessions = loader.load() len(chat_sessions) from langchain_community.chat_loaders.utils import ( map_ai_messages, merge_chat_runs, ) merged_sessions = merge_chat_runs(chat_sessions) alternating_sessions = list(
map_ai_messages(merged_sessions, "Harry Potter")
langchain_community.chat_loaders.utils.map_ai_messages
from getpass import getpass KAY_API_KEY = getpass() OPENAI_API_KEY = getpass() import os os.environ["KAY_API_KEY"] = KAY_API_KEY os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY from langchain.chains import ConversationalRetrievalChain from langchain.retrievers import KayAiRetriever from langchain_openai import ChatOpenAI model = ChatOpenAI(model_name="gpt-3.5-turbo") retriever = KayAiRetriever.create( dataset_id="company", data_types=["PressRelease"], num_contexts=6 ) qa =
ConversationalRetrievalChain.from_llm(model, retriever=retriever)
langchain.chains.ConversationalRetrievalChain.from_llm
get_ipython().run_line_magic('pip', 'install --upgrade --quiet annoy') from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import Annoy embeddings_func = HuggingFaceEmbeddings() texts = ["pizza is great", "I love salad", "my car", "a dog"] vector_store = Annoy.from_texts(texts, embeddings_func) vector_store_v2 = Annoy.from_texts( texts, embeddings_func, metric="dot", n_trees=100, n_jobs=1 ) vector_store.similarity_search("food", k=3) vector_store.similarity_search_with_score("food", k=3) from langchain_community.document_loaders import TextLoader from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txtn.txtn.txt") documents = loader.load() text_splitter =
CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
langchain_text_splitters.CharacterTextSplitter
import re from IPython.display import Image, display from steamship import Block, Steamship from langchain.agents import AgentType, initialize_agent from langchain.tools import SteamshipImageGenerationTool from langchain_openai import OpenAI llm = OpenAI(temperature=0) tools = [
SteamshipImageGenerationTool(model_name="dall-e")
langchain.tools.SteamshipImageGenerationTool
get_ipython().run_line_magic('pip', 'install --upgrade --quiet gigachat') import os from getpass import getpass os.environ["GIGACHAT_CREDENTIALS"] = getpass() from langchain_community.chat_models import GigaChat chat =
GigaChat(verify_ssl_certs=False)
langchain_community.chat_models.GigaChat
from langchain.retrievers import ParentDocumentRetriever from langchain.storage import InMemoryStore from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import Chroma from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter loaders = [ TextLoader("../../paul_graham_essay.txt"), TextLoader("../../state_of_the_union.txt"), ] docs = [] for loader in loaders: docs.extend(loader.load()) child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) vectorstore = Chroma( collection_name="full_documents", embedding_function=OpenAIEmbeddings() ) store =
InMemoryStore()
langchain.storage.InMemoryStore
import pprint from langchain_community.utilities import SearxSearchWrapper search = SearxSearchWrapper(searx_host="http://127.0.0.1:8888") search.run("What is the capital of France") search = SearxSearchWrapper( searx_host="http://127.0.0.1:8888", k=5 ) # k is for max number of items search.run("large language model ", engines=["wiki"]) search = SearxSearchWrapper(searx_host="http://127.0.0.1:8888", k=1) search.run("deep learning", language="es", engines=["wiki"]) search =
SearxSearchWrapper(searx_host="http://127.0.0.1:8888")
langchain_community.utilities.SearxSearchWrapper
from langchain import hub from langchain.agents import AgentExecutor, create_react_agent from langchain_community.tools import WikipediaQueryRun from langchain_community.utilities import WikipediaAPIWrapper from langchain_openai import ChatOpenAI api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100) tool =
WikipediaQueryRun(api_wrapper=api_wrapper)
langchain_community.tools.WikipediaQueryRun
from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import InMemoryByteStore from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import Chroma from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter loaders = [ TextLoader("../../paul_graham_essay.txt"), TextLoader("../../state_of_the_union.txt"), ] docs = [] for loader in loaders: docs.extend(loader.load()) text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000) docs = text_splitter.split_documents(docs) vectorstore = Chroma( collection_name="full_documents", embedding_function=OpenAIEmbeddings() ) store =
InMemoryByteStore()
langchain.storage.InMemoryByteStore
from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI template = """Human: {question} AI Assistant: """ prompt = PromptTemplate.from_template(template) import getpass my_account_id = getpass.getpass("Enter your Cloudflare account ID:\n\n") my_api_token = getpass.getpass("Enter your Cloudflare API token:\n\n") llm =
CloudflareWorkersAI(account_id=my_account_id, api_token=my_api_token)
langchain_community.llms.cloudflare_workersai.CloudflareWorkersAI
from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import InMemoryByteStore from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import Chroma from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter loaders = [ TextLoader("../../paul_graham_essay.txt"), TextLoader("../../state_of_the_union.txt"), ] docs = [] for loader in loaders: docs.extend(loader.load()) text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000) docs = text_splitter.split_documents(docs) vectorstore = Chroma( collection_name="full_documents", embedding_function=OpenAIEmbeddings() ) store = InMemoryByteStore() id_key = "doc_id" retriever = MultiVectorRetriever( vectorstore=vectorstore, byte_store=store, id_key=id_key, ) import uuid doc_ids = [str(uuid.uuid4()) for _ in docs] child_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400) sub_docs = [] for i, doc in enumerate(docs): _id = doc_ids[i] _sub_docs = child_text_splitter.split_documents([doc]) for _doc in _sub_docs: _doc.metadata[id_key] = _id sub_docs.extend(_sub_docs) retriever.vectorstore.add_documents(sub_docs) retriever.docstore.mset(list(zip(doc_ids, docs))) retriever.vectorstore.similarity_search("justice breyer")[0] len(retriever.get_relevant_documents("justice breyer")[0].page_content) from langchain.retrievers.multi_vector import SearchType retriever.search_type = SearchType.mmr len(retriever.get_relevant_documents("justice breyer")[0].page_content) import uuid from langchain_core.documents import Document from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI chain = ( {"doc": lambda x: x.page_content} | ChatPromptTemplate.from_template("Summarize the following document:\n\n{doc}") | ChatOpenAI(max_retries=0) | StrOutputParser() ) summaries = chain.batch(docs, {"max_concurrency": 5}) vectorstore = Chroma(collection_name="summaries", embedding_function=OpenAIEmbeddings()) store = InMemoryByteStore() id_key = "doc_id" retriever = MultiVectorRetriever( vectorstore=vectorstore, byte_store=store, id_key=id_key, ) doc_ids = [str(uuid.uuid4()) for _ in docs] summary_docs = [ Document(page_content=s, metadata={id_key: doc_ids[i]}) for i, s in enumerate(summaries) ] retriever.vectorstore.add_documents(summary_docs) retriever.docstore.mset(list(zip(doc_ids, docs))) sub_docs = vectorstore.similarity_search("justice breyer") sub_docs[0] retrieved_docs = retriever.get_relevant_documents("justice breyer") len(retrieved_docs[0].page_content) functions = [ { "name": "hypothetical_questions", "description": "Generate hypothetical questions", "parameters": { "type": "object", "properties": { "questions": { "type": "array", "items": {"type": "string"}, }, }, "required": ["questions"], }, } ] from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParser chain = ( {"doc": lambda x: x.page_content} | ChatPromptTemplate.from_template( "Generate a list of exactly 3 hypothetical questions that the below document could be used to answer:\n\n{doc}" ) | ChatOpenAI(max_retries=0, model="gpt-4").bind( functions=functions, function_call={"name": "hypothetical_questions"} ) |
JsonKeyOutputFunctionsParser(key_name="questions")
langchain.output_parsers.openai_functions.JsonKeyOutputFunctionsParser
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-community') from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.schema.messages import AIMessage from langchain_community.llms.chatglm3 import ChatGLM3 template = """{question}""" prompt = PromptTemplate.from_template(template) endpoint_url = "http://127.0.0.1:8000/v1/chat/completions" messages = [
AIMessage(content="我将从美国到中国来旅游,出行前希望了解中国的城市")
langchain.schema.messages.AIMessage
get_ipython().run_cell_magic('writefile', 'discord_chats.txt', "talkingtower — 08/15/2023 11:10 AM\nLove music! Do you like jazz?\nreporterbob — 08/15/2023 9:27 PM\nYes! Jazz is fantastic. Ever heard this one?\nWebsite\nListen to classic jazz track...\n\ntalkingtower — Yesterday at 5:03 AM\nIndeed! Great choice. 🎷\nreporterbob — Yesterday at 5:23 AM\nThanks! How about some virtual sightseeing?\nWebsite\nVirtual tour of famous landmarks...\n\ntalkingtower — Today at 2:38 PM\nSounds fun! Let's explore.\nreporterbob — Today at 2:56 PM\nEnjoy the tour! See you around.\ntalkingtower — Today at 3:00 PM\nThank you! Goodbye! 👋\nreporterbob — Today at 3:02 PM\nFarewell! Happy exploring.\n") import logging import re from typing import Iterator, List from langchain_community.chat_loaders import base as chat_loaders from langchain_core.messages import BaseMessage, HumanMessage logger = logging.getLogger() class DiscordChatLoader(chat_loaders.BaseChatLoader): def __init__(self, path: str): """ Initialize the Discord chat loader. Args: path: Path to the exported Discord chat text file. """ self.path = path self._message_line_regex = re.compile( r"(.+?) — (\w{3,9} \d{1,2}(?:st|nd|rd|th)?(?:, \d{4})? \d{1,2}:\d{2} (?:AM|PM)|Today at \d{1,2}:\d{2} (?:AM|PM)|Yesterday at \d{1,2}:\d{2} (?:AM|PM))", # noqa flags=re.DOTALL, ) def _load_single_chat_session_from_txt( self, file_path: str ) -> chat_loaders.ChatSession: """ Load a single chat session from a text file. Args: file_path: Path to the text file containing the chat messages. Returns: A `ChatSession` object containing the loaded chat messages. """ with open(file_path, "r", encoding="utf-8") as file: lines = file.readlines() results: List[BaseMessage] = [] current_sender = None current_timestamp = None current_content = [] for line in lines: if re.match( r".+? — (\d{2}/\d{2}/\d{4} \d{1,2}:\d{2} (?:AM|PM)|Today at \d{1,2}:\d{2} (?:AM|PM)|Yesterday at \d{1,2}:\d{2} (?:AM|PM))", # noqa line, ): if current_sender and current_content: results.append( HumanMessage( content="".join(current_content).strip(), additional_kwargs={ "sender": current_sender, "events": [{"message_time": current_timestamp}], }, ) ) current_sender, current_timestamp = line.split(" — ")[:2] current_content = [ line[len(current_sender) + len(current_timestamp) + 4 :].strip() ] elif re.match(r"\[\d{1,2}:\d{2} (?:AM|PM)\]", line.strip()): results.append( HumanMessage( content="".join(current_content).strip(), additional_kwargs={ "sender": current_sender, "events": [{"message_time": current_timestamp}], }, ) ) current_timestamp = line.strip()[1:-1] current_content = [] else: current_content.append("\n" + line.strip()) if current_sender and current_content: results.append( HumanMessage( content="".join(current_content).strip(), additional_kwargs={ "sender": current_sender, "events": [{"message_time": current_timestamp}], }, ) ) return
chat_loaders.ChatSession(messages=results)
langchain_community.chat_loaders.base.ChatSession
get_ipython().run_line_magic('pip', 'install --upgrade --quiet wandb') get_ipython().run_line_magic('pip', 'install --upgrade --quiet pandas') get_ipython().run_line_magic('pip', 'install --upgrade --quiet textstat') get_ipython().run_line_magic('pip', 'install --upgrade --quiet spacy') get_ipython().system('python -m spacy download en_core_web_sm') import os os.environ["WANDB_API_KEY"] = "" from datetime import datetime from langchain.callbacks import StdOutCallbackHandler, WandbCallbackHandler from langchain_openai import OpenAI """Main function. This function is used to try the callback handler. Scenarios: 1. OpenAI LLM 2. Chain with multiple SubChains on multiple generations 3. Agent with Tools """ session_group = datetime.now().strftime("%m.%d.%Y_%H.%M.%S") wandb_callback = WandbCallbackHandler( job_type="inference", project="langchain_callback_demo", group=f"minimal_{session_group}", name="llm", tags=["test"], ) callbacks = [StdOutCallbackHandler(), wandb_callback] llm = OpenAI(temperature=0, callbacks=callbacks) llm_result = llm.generate(["Tell me a joke", "Tell me a poem"] * 3) wandb_callback.flush_tracker(llm, name="simple_sequential") from langchain.chains import LLMChain from langchain.prompts import PromptTemplate template = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title. Title: {title} Playwright: This is a synopsis for the above play:""" prompt_template = PromptTemplate(input_variables=["title"], template=template) synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callbacks=callbacks) test_prompts = [ { "title": "documentary about good video games that push the boundary of game design" }, {"title": "cocaine bear vs heroin wolf"}, {"title": "the best in class mlops tooling"}, ] synopsis_chain.apply(test_prompts) wandb_callback.flush_tracker(synopsis_chain, name="agent") from langchain.agents import AgentType, initialize_agent, load_tools tools =
load_tools(["serpapi", "llm-math"], llm=llm)
langchain.agents.load_tools
from langchain.pydantic_v1 import BaseModel, Field from langchain.tools import BaseTool, StructuredTool, tool @tool def search(query: str) -> str: """Look up things online.""" return "LangChain" print(search.name) print(search.description) print(search.args) @tool def multiply(a: int, b: int) -> int: """Multiply two numbers.""" return a * b print(multiply.name) print(multiply.description) print(multiply.args) class SearchInput(BaseModel): query: str = Field(description="should be a search query") @tool("search-tool", args_schema=SearchInput, return_direct=True) def search(query: str) -> str: """Look up things online.""" return "LangChain" print(search.name) print(search.description) print(search.args) print(search.return_direct) from typing import Optional, Type from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) class SearchInput(BaseModel): query: str = Field(description="should be a search query") class CalculatorInput(BaseModel): a: int = Field(description="first number") b: int = Field(description="second number") class CustomSearchTool(BaseTool): name = "custom_search" description = "useful for when you need to answer questions about current events" args_schema: Type[BaseModel] = SearchInput def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None ) -> str: """Use the tool.""" return "LangChain" async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("custom_search does not support async") class CustomCalculatorTool(BaseTool): name = "Calculator" description = "useful for when you need to answer questions about math" args_schema: Type[BaseModel] = CalculatorInput return_direct: bool = True def _run( self, a: int, b: int, run_manager: Optional[CallbackManagerForToolRun] = None ) -> str: """Use the tool.""" return a * b async def _arun( self, a: int, b: int, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("Calculator does not support async") search = CustomSearchTool() print(search.name) print(search.description) print(search.args) multiply = CustomCalculatorTool() print(multiply.name) print(multiply.description) print(multiply.args) print(multiply.return_direct) def search_function(query: str): return "LangChain" search = StructuredTool.from_function( func=search_function, name="Search", description="useful for when you need to answer questions about current events", ) print(search.name) print(search.description) print(search.args) class CalculatorInput(BaseModel): a: int =
Field(description="first number")
langchain.pydantic_v1.Field
get_ipython().system(' pip install langchain docugami==0.0.8 dgml-utils==0.3.0 pydantic langchainhub chromadb hnswlib --upgrade --quiet') from pprint import pprint from docugami import Docugami from docugami.lib.upload import upload_to_named_docset, wait_for_dgml DOCSET_NAME = "NTSB Aviation Incident Reports" FILE_PATHS = [ "/Users/tjaffri/ntsb/Report_CEN23LA277_192541.pdf", "/Users/tjaffri/ntsb/Report_CEN23LA338_192753.pdf", "/Users/tjaffri/ntsb/Report_CEN23LA363_192876.pdf", "/Users/tjaffri/ntsb/Report_CEN23LA394_192995.pdf", "/Users/tjaffri/ntsb/Report_ERA23LA114_106615.pdf", "/Users/tjaffri/ntsb/Report_WPR23LA254_192532.pdf", ] assert len(FILE_PATHS) > 5, "Please provide at least 6 files" dg_client = Docugami() dg_docs = upload_to_named_docset(dg_client, FILE_PATHS, DOCSET_NAME) dgml_paths = wait_for_dgml(dg_client, dg_docs) pprint(dgml_paths) from pathlib import Path from dgml_utils.segmentation import get_chunks_str dgml_path = dgml_paths[Path(FILE_PATHS[0]).name] with open(dgml_path, "r") as file: contents = file.read().encode("utf-8") chunks = get_chunks_str( contents, include_xml_tags=True, # Ensures Docugami XML semantic tags are included in the chunked output (set to False for text-only chunks and tables as Markdown) max_text_length=1024 * 8, # 8k chars are ~2k tokens for OpenAI. ) print(f"found {len(chunks)} chunks, here are the first few") for chunk in chunks[:10]: print(chunk.text) with open(dgml_path, "r") as file: contents = file.read().encode("utf-8") chunks = get_chunks_str( contents, include_xml_tags=False, # text-only chunks and tables as Markdown max_text_length=1024 * 8, # 8k chars are ~2k tokens for OpenAI. Ref: https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them ) print(f"found {len(chunks)} chunks, here are the first few") for chunk in chunks[:10]: print(chunk.text) import requests dgml = requests.get( "https://raw.githubusercontent.com/docugami/dgml-utils/main/python/tests/test_data/article/Jane%20Doe.xml" ).text chunks = get_chunks_str(dgml, include_xml_tags=True) len(chunks) category_counts = {} for element in chunks: category = element.structure if category in category_counts: category_counts[category] += 1 else: category_counts[category] = 1 category_counts table_elements = [c for c in chunks if "table" in c.structure.split()] print(f"There are {len(table_elements)} tables") text_elements = [c for c in chunks if "table" not in c.structure.split()] print(f"There are {len(text_elements)} text elements") for element in text_elements[:20]: print(element.text) print(table_elements[0].text) chunks_as_text = get_chunks_str(dgml, include_xml_tags=False) table_elements_as_text = [c for c in chunks_as_text if "table" in c.structure.split()] print(table_elements_as_text[0].text) from langchain.prompts import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from langchain_core.output_parsers import StrOutputParser from langchain_openai import ChatOpenAI prompt_text = """You are an assistant tasked with summarizing tables and text. \ Give a concise summary of the table or text. Table or text chunk: {element} """ prompt = ChatPromptTemplate.from_template(prompt_text) model = ChatOpenAI(temperature=0, model="gpt-4") summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser() tables = [i.text for i in table_elements] table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5}) import uuid from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import InMemoryStore from langchain_community.vectorstores.chroma import Chroma from langchain_core.documents import Document from langchain_openai import OpenAIEmbeddings def build_retriever(text_elements, tables, table_summaries): vectorstore = Chroma( collection_name="summaries", embedding_function=OpenAIEmbeddings() ) store =
InMemoryStore()
langchain.storage.InMemoryStore
get_ipython().run_line_magic('pip', 'install --upgrade --quiet clickhouse-connect') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") os.environ["OPENAI_API_BASE"] = getpass.getpass("OpenAI Base:") os.environ["MYSCALE_HOST"] = getpass.getpass("MyScale Host:") os.environ["MYSCALE_PORT"] = getpass.getpass("MyScale Port:") os.environ["MYSCALE_USERNAME"] = getpass.getpass("MyScale Username:") os.environ["MYSCALE_PASSWORD"] = getpass.getpass("MyScale Password:") from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import MyScale from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter from langchain_community.document_loaders import TextLoader loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() for d in docs: d.metadata = {"some": "metadata"} docsearch =
MyScale.from_documents(docs, embeddings)
langchain_community.vectorstores.MyScale.from_documents
from langchain.memory import ConversationKGMemory from langchain_openai import OpenAI llm = OpenAI(temperature=0) memory = ConversationKGMemory(llm=llm) memory.save_context({"input": "say hi to sam"}, {"output": "who is sam"}) memory.save_context({"input": "sam is a friend"}, {"output": "okay"}) memory.load_memory_variables({"input": "who is sam"}) memory = ConversationKGMemory(llm=llm, return_messages=True) memory.save_context({"input": "say hi to sam"}, {"output": "who is sam"}) memory.save_context({"input": "sam is a friend"}, {"output": "okay"}) memory.load_memory_variables({"input": "who is sam"}) memory.get_current_entities("what's Sams favorite color?") memory.get_knowledge_triplets("her favorite color is red") llm = OpenAI(temperature=0) from langchain.chains import ConversationChain from langchain.prompts.prompt import PromptTemplate template = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. The AI ONLY uses information contained in the "Relevant Information" section and does not hallucinate. Relevant Information: {history} Conversation: Human: {input} AI:""" prompt =
PromptTemplate(input_variables=["history", "input"], template=template)
langchain.prompts.prompt.PromptTemplate
from langchain_community.document_loaders import WebBaseLoader loader = WebBaseLoader("https://www.espn.com/") data = loader.load() data """ import requests from bs4 import BeautifulSoup html_doc = requests.get("{INSERT_NEW_URL_HERE}") soup = BeautifulSoup(html_doc.text, 'html.parser') """ loader =
WebBaseLoader(["https://www.espn.com/", "https://google.com"])
langchain_community.document_loaders.WebBaseLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet docx2txt') from langchain_community.document_loaders import Docx2txtLoader loader = Docx2txtLoader("example_data/fake.docx") data = loader.load() data from langchain_community.document_loaders import UnstructuredWordDocumentLoader loader =
UnstructuredWordDocumentLoader("example_data/fake.docx")
langchain_community.document_loaders.UnstructuredWordDocumentLoader
get_ipython().system('pip install -U openai langchain langchain-experimental') from langchain_core.messages import HumanMessage, SystemMessage from langchain_openai import ChatOpenAI chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=256) chat.invoke( [ HumanMessage( content=[ {"type": "text", "text": "What is this image showing"}, { "type": "image_url", "image_url": { "url": "https://raw.githubusercontent.com/langchain-ai/langchain/master/docs/static/img/langchain_stack.png", "detail": "auto", }, }, ] ) ] ) from langchain.agents.openai_assistant import OpenAIAssistantRunnable interpreter_assistant = OpenAIAssistantRunnable.create_assistant( name="langchain assistant", instructions="You are a personal math tutor. Write and run code to answer math questions.", tools=[{"type": "code_interpreter"}], model="gpt-4-1106-preview", ) output = interpreter_assistant.invoke({"content": "What's 10 - 4 raised to the 2.7"}) output get_ipython().system('pip install e2b duckduckgo-search') from langchain.tools import DuckDuckGoSearchRun, E2BDataAnalysisTool tools = [E2BDataAnalysisTool(api_key="..."), DuckDuckGoSearchRun()] agent = OpenAIAssistantRunnable.create_assistant( name="langchain assistant e2b tool", instructions="You are a personal math tutor. Write and run code to answer math questions. You can also search the internet.", tools=tools, model="gpt-4-1106-preview", as_agent=True, ) from langchain.agents import AgentExecutor agent_executor = AgentExecutor(agent=agent, tools=tools) agent_executor.invoke({"content": "What's the weather in SF today divided by 2.7"}) agent = OpenAIAssistantRunnable.create_assistant( name="langchain assistant e2b tool", instructions="You are a personal math tutor. Write and run code to answer math questions.", tools=tools, model="gpt-4-1106-preview", as_agent=True, ) from langchain_core.agents import AgentFinish def execute_agent(agent, tools, input): tool_map = {tool.name: tool for tool in tools} response = agent.invoke(input) while not isinstance(response, AgentFinish): tool_outputs = [] for action in response: tool_output = tool_map[action.tool].invoke(action.tool_input) print(action.tool, action.tool_input, tool_output, end="\n\n") tool_outputs.append( {"output": tool_output, "tool_call_id": action.tool_call_id} ) response = agent.invoke( { "tool_outputs": tool_outputs, "run_id": action.run_id, "thread_id": action.thread_id, } ) return response response = execute_agent(agent, tools, {"content": "What's 10 - 4 raised to the 2.7"}) print(response.return_values["output"]) next_response = execute_agent( agent, tools, {"content": "now add 17.241", "thread_id": response.thread_id} ) print(next_response.return_values["output"]) chat = ChatOpenAI(model="gpt-3.5-turbo-1106").bind( response_format={"type": "json_object"} ) output = chat.invoke( [ SystemMessage( content="Extract the 'name' and 'origin' of any companies mentioned in the following statement. Return a JSON list." ), HumanMessage( content="Google was founded in the USA, while Deepmind was founded in the UK" ), ] ) print(output.content) import json json.loads(output.content) chat = ChatOpenAI(model="gpt-3.5-turbo-1106") output = chat.generate( [ [ SystemMessage( content="Extract the 'name' and 'origin' of any companies mentioned in the following statement. Return a JSON list." ), HumanMessage( content="Google was founded in the USA, while Deepmind was founded in the UK" ), ] ] ) print(output.llm_output) from typing import Literal from langchain.output_parsers.openai_tools import PydanticToolsParser from langchain.utils.openai_functions import convert_pydantic_to_openai_tool from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field class GetCurrentWeather(BaseModel): """Get the current weather in a location.""" location: str = Field(description="The city and state, e.g. San Francisco, CA") unit: Literal["celsius", "fahrenheit"] = Field( default="fahrenheit", description="The temperature unit, default to fahrenheit" ) prompt = ChatPromptTemplate.from_messages( [("system", "You are a helpful assistant"), ("user", "{input}")] ) model = ChatOpenAI(model="gpt-3.5-turbo-1106").bind( tools=[convert_pydantic_to_openai_tool(GetCurrentWeather)] ) chain = prompt | model |
PydanticToolsParser(tools=[GetCurrentWeather])
langchain.output_parsers.openai_tools.PydanticToolsParser
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-nvidia-ai-endpoints') import getpass import os if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"): nvapi_key = getpass.getpass("Enter your NVIDIA API key: ") assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key" os.environ["NVIDIA_API_KEY"] = nvapi_key from langchain_nvidia_ai_endpoints import ChatNVIDIA llm = ChatNVIDIA(model="mixtral_8x7b") result = llm.invoke("Write a ballad about LangChain.") print(result.content) print(llm.batch(["What's 2*3?", "What's 2*6?"])) for chunk in llm.stream("How far can a seagull fly in one day?"): print(chunk.content, end="|") async for chunk in llm.astream( "How long does it take for monarch butterflies to migrate?" ): print(chunk.content, end="|") ChatNVIDIA.get_available_models() from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_nvidia_ai_endpoints import ChatNVIDIA prompt = ChatPromptTemplate.from_messages( [("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")] ) chain = prompt | ChatNVIDIA(model="llama2_13b") | StrOutputParser() for txt in chain.stream({"input": "What's your name?"}): print(txt, end="") prompt = ChatPromptTemplate.from_messages( [ ( "system", "You are an expert coding AI. Respond only in valid python; no narration whatsoever.", ), ("user", "{input}"), ] ) chain = prompt | ChatNVIDIA(model="llama2_code_70b") | StrOutputParser() for txt in chain.stream({"input": "How do I solve this fizz buzz problem?"}): print(txt, end="") from langchain_nvidia_ai_endpoints import ChatNVIDIA llm = ChatNVIDIA(model="nemotron_steerlm_8b") complex_result = llm.invoke( "What's a PB&J?", labels={"creativity": 0, "complexity": 3, "verbosity": 0} ) print("Un-creative\n") print(complex_result.content) print("\n\nCreative\n") creative_result = llm.invoke( "What's a PB&J?", labels={"creativity": 9, "complexity": 3, "verbosity": 9} ) print(creative_result.content) from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_nvidia_ai_endpoints import ChatNVIDIA prompt = ChatPromptTemplate.from_messages( [("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")] ) chain = ( prompt | ChatNVIDIA(model="nemotron_steerlm_8b").bind( labels={"creativity": 9, "complexity": 0, "verbosity": 9} ) | StrOutputParser() ) for txt in chain.stream({"input": "Why is a PB&J?"}): print(txt, end="") import IPython import requests image_url = "https://www.nvidia.com/content/dam/en-zz/Solutions/research/ai-playground/nvidia-picasso-3c33-p@2x.jpg" ## Large Image image_content = requests.get(image_url).content IPython.display.Image(image_content) from langchain_nvidia_ai_endpoints import ChatNVIDIA llm = ChatNVIDIA(model="playground_neva_22b") from langchain_core.messages import HumanMessage llm.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, {"type": "image_url", "image_url": {"url": image_url}}, ] ) ] ) from langchain_core.messages import HumanMessage llm.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, {"type": "image_url", "image_url": {"url": image_url}}, ] ) ], labels={"creativity": 0, "quality": 9, "complexity": 0, "verbosity": 0}, ) import IPython import requests image_url = "https://picsum.photos/seed/kitten/300/200" image_content = requests.get(image_url).content IPython.display.Image(image_content) import base64 from langchain_core.messages import HumanMessage b64_string = base64.b64encode(image_content).decode("utf-8") llm.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, { "type": "image_url", "image_url": {"url": f"data:image/png;base64,{b64_string}"}, }, ] ) ] ) base64_with_mime_type = f"data:image/png;base64,{b64_string}" llm.invoke(f'What\'s in this image?\n<img src="{base64_with_mime_type}" />') from langchain_nvidia_ai_endpoints import ChatNVIDIA kosmos = ChatNVIDIA(model="kosmos_2") from langchain_core.messages import HumanMessage def drop_streaming_key(d): """Takes in payload dictionary, outputs new payload dictionary""" if "stream" in d: d.pop("stream") return d kosmos = ChatNVIDIA(model="kosmos_2") kosmos.client.payload_fn = drop_streaming_key kosmos.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, {"type": "image_url", "image_url": {"url": image_url}}, ] ) ] ) import base64 from io import BytesIO from PIL import Image img_gen = ChatNVIDIA(model="sdxl_turbo") def to_sdxl_payload(d): if d: d = {"prompt": d.get("messages", [{}])[0].get("content")} d["inference_steps"] = 4 ## why not add another argument? return d img_gen.client.payload_fn = to_sdxl_payload def to_pil_img(d): return Image.open(BytesIO(base64.b64decode(d))) (img_gen | StrOutputParser() | to_pil_img).invoke("white cat playing") from langchain_core.messages import ChatMessage from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_nvidia_ai_endpoints import ChatNVIDIA prompt = ChatPromptTemplate.from_messages( [ ChatMessage( role="context", content="Parrots and Cats have signed the peace accord." ), ("user", "{input}"), ] ) llm = ChatNVIDIA(model="nemotron_qa_8b") chain = prompt | llm | StrOutputParser() chain.invoke({"input": "What was signed?"}) get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain') from langchain.chains import ConversationChain from langchain.memory import ConversationBufferMemory chat = ChatNVIDIA(model="mixtral_8x7b", temperature=0.1, max_tokens=100, top_p=1.0) conversation = ConversationChain(llm=chat, memory=
ConversationBufferMemory()
langchain.memory.ConversationBufferMemory
from typing import Callable, List import tenacity from langchain.output_parsers import RegexParser from langchain.prompts import PromptTemplate from langchain.schema import ( HumanMessage, SystemMessage, ) from langchain_openai import ChatOpenAI class DialogueAgent: def __init__( self, name: str, system_message: SystemMessage, model: ChatOpenAI, ) -> None: self.name = name self.system_message = system_message self.model = model self.prefix = f"{self.name}: " self.reset() def reset(self): self.message_history = ["Here is the conversation so far."] def send(self) -> str: """ Applies the chatmodel to the message history and returns the message string """ message = self.model( [ self.system_message, HumanMessage(content="\n".join(self.message_history + [self.prefix])), ] ) return message.content def receive(self, name: str, message: str) -> None: """ Concatenates {message} spoken by {name} into message history """ self.message_history.append(f"{name}: {message}") class DialogueSimulator: def __init__( self, agents: List[DialogueAgent], selection_function: Callable[[int, List[DialogueAgent]], int], ) -> None: self.agents = agents self._step = 0 self.select_next_speaker = selection_function def reset(self): for agent in self.agents: agent.reset() def inject(self, name: str, message: str): """ Initiates the conversation with a {message} from {name} """ for agent in self.agents: agent.receive(name, message) self._step += 1 def step(self) -> tuple[str, str]: speaker_idx = self.select_next_speaker(self._step, self.agents) speaker = self.agents[speaker_idx] message = speaker.send() for receiver in self.agents: receiver.receive(speaker.name, message) self._step += 1 return speaker.name, message class BiddingDialogueAgent(DialogueAgent): def __init__( self, name, system_message: SystemMessage, bidding_template: PromptTemplate, model: ChatOpenAI, ) -> None: super().__init__(name, system_message, model) self.bidding_template = bidding_template def bid(self) -> str: """ Asks the chat model to output a bid to speak """ prompt = PromptTemplate( input_variables=["message_history", "recent_message"], template=self.bidding_template, ).format( message_history="\n".join(self.message_history), recent_message=self.message_history[-1], ) bid_string = self.model([
SystemMessage(content=prompt)
langchain.schema.SystemMessage
get_ipython().system('poetry run pip install dgml-utils==0.3.0 --upgrade --quiet') import os from langchain_community.document_loaders import DocugamiLoader DOCUGAMI_API_KEY = os.environ.get("DOCUGAMI_API_KEY") docset_id = "26xpy3aes7xp" document_ids = ["d7jqdzcj50sj", "cgd1eacfkchw"] loader = DocugamiLoader(docset_id=docset_id, document_ids=document_ids) chunks = loader.load() len(chunks) loader.min_text_length = 64 loader.include_xml_tags = True chunks = loader.load() for chunk in chunks[:5]: print(chunk) get_ipython().system('poetry run pip install --upgrade langchain-openai tiktoken chromadb hnswlib') loader = DocugamiLoader(docset_id="zo954yqy53wp") chunks = loader.load() for chunk in chunks: stripped_metadata = chunk.metadata.copy() for key in chunk.metadata: if key not in ["name", "xpath", "id", "structure"]: del stripped_metadata[key] chunk.metadata = stripped_metadata print(len(chunks)) from langchain.chains import RetrievalQA from langchain_community.vectorstores.chroma import Chroma from langchain_openai import OpenAI, OpenAIEmbeddings embedding = OpenAIEmbeddings() vectordb = Chroma.from_documents(documents=chunks, embedding=embedding) retriever = vectordb.as_retriever() qa_chain = RetrievalQA.from_chain_type( llm=OpenAI(), chain_type="stuff", retriever=retriever, return_source_documents=True ) qa_chain("What can tenants do with signage on their properties?") chain_response = qa_chain("What is rentable area for the property owned by DHA Group?") chain_response["result"] # correct answer should be 13,500 sq ft chain_response["source_documents"] loader = DocugamiLoader(docset_id="zo954yqy53wp") loader.include_xml_tags = ( True # for additional semantics from the Docugami knowledge graph ) chunks = loader.load() print(chunks[0].metadata) get_ipython().system('poetry run pip install --upgrade lark --quiet') from langchain.chains.query_constructor.schema import AttributeInfo from langchain.retrievers.self_query.base import SelfQueryRetriever from langchain_community.vectorstores.chroma import Chroma EXCLUDE_KEYS = ["id", "xpath", "structure"] metadata_field_info = [ AttributeInfo( name=key, description=f"The {key} for this chunk", type="string", ) for key in chunks[0].metadata if key.lower() not in EXCLUDE_KEYS ] document_content_description = "Contents of this chunk" llm = OpenAI(temperature=0) vectordb =
Chroma.from_documents(documents=chunks, embedding=embedding)
langchain_community.vectorstores.chroma.Chroma.from_documents
get_ipython().run_line_magic('pip', 'install --upgrade --quiet titan-iris') from langchain_community.llms import TitanTakeoff llm = TitanTakeoff( base_url="http://localhost:8000", generate_max_length=128, temperature=1.0 ) prompt = "What is the largest planet in the solar system?" llm(prompt) from langchain.callbacks.manager import CallbackManager from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler llm = TitanTakeoff( callback_manager=CallbackManager([
StreamingStdOutCallbackHandler()
langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler
import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pypdf pymongo langchain-openai tiktoken') import getpass MONGODB_ATLAS_CLUSTER_URI = getpass.getpass("MongoDB Atlas Cluster URI:") from pymongo import MongoClient client = MongoClient(MONGODB_ATLAS_CLUSTER_URI) DB_NAME = "langchain_db" COLLECTION_NAME = "test" ATLAS_VECTOR_SEARCH_INDEX_NAME = "index_name" MONGODB_COLLECTION = client[DB_NAME][COLLECTION_NAME] from langchain_community.document_loaders import PyPDFLoader loader =
PyPDFLoader("https://arxiv.org/pdf/2303.08774.pdf")
langchain_community.document_loaders.PyPDFLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-nvidia-ai-endpoints') import getpass import os if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"): nvapi_key = getpass.getpass("Enter your NVIDIA API key: ") assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key" os.environ["NVIDIA_API_KEY"] = nvapi_key from langchain_nvidia_ai_endpoints import ChatNVIDIA llm = ChatNVIDIA(model="mixtral_8x7b") result = llm.invoke("Write a ballad about LangChain.") print(result.content) print(llm.batch(["What's 2*3?", "What's 2*6?"])) for chunk in llm.stream("How far can a seagull fly in one day?"): print(chunk.content, end="|") async for chunk in llm.astream( "How long does it take for monarch butterflies to migrate?" ): print(chunk.content, end="|")
ChatNVIDIA.get_available_models()
langchain_nvidia_ai_endpoints.ChatNVIDIA.get_available_models
from langchain_community.tools.edenai import ( EdenAiExplicitImageTool, EdenAiObjectDetectionTool, EdenAiParsingIDTool, EdenAiParsingInvoiceTool, EdenAiSpeechToTextTool, EdenAiTextModerationTool, EdenAiTextToSpeechTool, ) from langchain.agents import AgentType, initialize_agent from langchain_community.llms import EdenAI llm = EdenAI( feature="text", provider="openai", params={"temperature": 0.2, "max_tokens": 250} ) tools = [ EdenAiTextModerationTool(providers=["openai"], language="en"), EdenAiObjectDetectionTool(providers=["google", "api4ai"]), EdenAiTextToSpeechTool(providers=["amazon"], language="en", voice="MALE"),
EdenAiExplicitImageTool(providers=["amazon", "google"])
langchain_community.tools.edenai.EdenAiExplicitImageTool
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-openai langchain-anthropic langchain-community wikipedia') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass() os.environ["ANTHROPIC_API_KEY"] = getpass.getpass() from langchain_community.retrievers import WikipediaRetriever from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) wiki = WikipediaRetriever(top_k_results=6, doc_content_chars_max=2000) prompt = ChatPromptTemplate.from_messages( [ ( "system", "You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\n\nHere are the Wikipedia articles:{context}", ), ("human", "{question}"), ] ) prompt.pretty_print() from operator import itemgetter from typing import List from langchain_core.documents import Document from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import ( RunnableLambda, RunnableParallel, RunnablePassthrough, ) def format_docs(docs: List[Document]) -> str: """Convert Documents to a single string.:""" formatted = [ f"Article Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}" for doc in docs ] return "\n\n" + "\n\n".join(formatted) format = itemgetter("docs") | RunnableLambda(format_docs) answer = prompt | llm | StrOutputParser() chain = ( RunnableParallel(question=RunnablePassthrough(), docs=wiki) .assign(context=format) .assign(answer=answer) .pick(["answer", "docs"]) ) chain.invoke("How fast are cheetahs?") from langchain_core.pydantic_v1 import BaseModel, Field class cited_answer(BaseModel): """Answer the user question based only on the given sources, and cite the sources used.""" answer: str = Field( ..., description="The answer to the user question, which is based only on the given sources.", ) citations: List[int] = Field( ..., description="The integer IDs of the SPECIFIC sources which justify the answer.", ) llm_with_tool = llm.bind_tools( [cited_answer], tool_choice="cited_answer", ) example_q = """What Brian's height? Source: 1 Information: Suzy is 6'2" Source: 2 Information: Jeremiah is blonde Source: 3 Information: Brian is 3 inches shorted than Suzy""" llm_with_tool.invoke(example_q) from langchain.output_parsers.openai_tools import JsonOutputKeyToolsParser output_parser = JsonOutputKeyToolsParser(key_name="cited_answer", return_single=True) (llm_with_tool | output_parser).invoke(example_q) def format_docs_with_id(docs: List[Document]) -> str: formatted = [ f"Source ID: {i}\nArticle Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}" for i, doc in enumerate(docs) ] return "\n\n" + "\n\n".join(formatted) format_1 = itemgetter("docs") | RunnableLambda(format_docs_with_id) answer_1 = prompt | llm_with_tool | output_parser chain_1 = ( RunnableParallel(question=RunnablePassthrough(), docs=wiki) .assign(context=format_1) .assign(cited_answer=answer_1) .pick(["cited_answer", "docs"]) ) chain_1.invoke("How fast are cheetahs?") class Citation(BaseModel): source_id: int = Field( ..., description="The integer ID of a SPECIFIC source which justifies the answer.", ) quote: str = Field( ..., description="The VERBATIM quote from the specified source that justifies the answer.", ) class quoted_answer(BaseModel): """Answer the user question based only on the given sources, and cite the sources used.""" answer: str = Field( ..., description="The answer to the user question, which is based only on the given sources.", ) citations: List[Citation] = Field( ..., description="Citations from the given sources that justify the answer." ) output_parser_2 =
JsonOutputKeyToolsParser(key_name="quoted_answer", return_single=True)
langchain.output_parsers.openai_tools.JsonOutputKeyToolsParser
get_ipython().system('pip3 install cerebrium') import os from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_community.llms import CerebriumAI os.environ["CEREBRIUMAI_API_KEY"] = "YOUR_KEY_HERE" llm = CerebriumAI(endpoint_url="YOUR ENDPOINT URL HERE") template = """Question: {question} Answer: Let's think step by step.""" prompt =
PromptTemplate.from_template(template)
langchain.prompts.PromptTemplate.from_template
get_ipython().run_line_magic('pip', 'install --upgrade --quiet dingodb') get_ipython().run_line_magic('pip', 'install --upgrade --quiet git+https://git@github.com/dingodb/pydingo.git') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import Dingo from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter from langchain_community.document_loaders import TextLoader loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings =
OpenAIEmbeddings()
langchain_openai.OpenAIEmbeddings
def pretty_print_docs(docs): print( f"\n{'-' * 100}\n".join( [f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)] ) ) from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import FAISS from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter documents = TextLoader("../../state_of_the_union.txt").load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(documents) retriever = FAISS.from_documents(texts, OpenAIEmbeddings()).as_retriever() docs = retriever.get_relevant_documents( "What did the president say about Ketanji Brown Jackson" ) pretty_print_docs(docs) from langchain.retrievers import ContextualCompressionRetriever from langchain.retrievers.document_compressors import LLMChainExtractor from langchain_openai import OpenAI llm = OpenAI(temperature=0) compressor = LLMChainExtractor.from_llm(llm) compression_retriever = ContextualCompressionRetriever( base_compressor=compressor, base_retriever=retriever ) compressed_docs = compression_retriever.get_relevant_documents( "What did the president say about Ketanji Jackson Brown" ) pretty_print_docs(compressed_docs) from langchain.retrievers.document_compressors import LLMChainFilter _filter = LLMChainFilter.from_llm(llm) compression_retriever = ContextualCompressionRetriever( base_compressor=_filter, base_retriever=retriever ) compressed_docs = compression_retriever.get_relevant_documents( "What did the president say about Ketanji Jackson Brown" ) pretty_print_docs(compressed_docs) from langchain.retrievers.document_compressors import EmbeddingsFilter from langchain_openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() embeddings_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76) compression_retriever = ContextualCompressionRetriever( base_compressor=embeddings_filter, base_retriever=retriever ) compressed_docs = compression_retriever.get_relevant_documents( "What did the president say about Ketanji Jackson Brown" ) pretty_print_docs(compressed_docs) from langchain.retrievers.document_compressors import DocumentCompressorPipeline from langchain_community.document_transformers import EmbeddingsRedundantFilter from langchain_text_splitters import CharacterTextSplitter splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=0, separator=". ") redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings) relevant_filter =
EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76)
langchain.retrievers.document_compressors.EmbeddingsFilter
import os import chromadb from langchain.retrievers import ContextualCompressionRetriever from langchain.retrievers.document_compressors import DocumentCompressorPipeline from langchain.retrievers.merger_retriever import MergerRetriever from langchain_community.document_transformers import ( EmbeddingsClusteringFilter, EmbeddingsRedundantFilter, ) from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import Chroma from langchain_openai import OpenAIEmbeddings all_mini = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") multi_qa_mini = HuggingFaceEmbeddings(model_name="multi-qa-MiniLM-L6-dot-v1") filter_embeddings = OpenAIEmbeddings() ABS_PATH = os.path.dirname(os.path.abspath(__file__)) DB_DIR = os.path.join(ABS_PATH, "db") client_settings = chromadb.config.Settings( is_persistent=True, persist_directory=DB_DIR, anonymized_telemetry=False, ) db_all = Chroma( collection_name="project_store_all", persist_directory=DB_DIR, client_settings=client_settings, embedding_function=all_mini, ) db_multi_qa = Chroma( collection_name="project_store_multi", persist_directory=DB_DIR, client_settings=client_settings, embedding_function=multi_qa_mini, ) retriever_all = db_all.as_retriever( search_type="similarity", search_kwargs={"k": 5, "include_metadata": True} ) retriever_multi_qa = db_multi_qa.as_retriever( search_type="mmr", search_kwargs={"k": 5, "include_metadata": True} ) lotr = MergerRetriever(retrievers=[retriever_all, retriever_multi_qa]) filter = EmbeddingsRedundantFilter(embeddings=filter_embeddings) pipeline =
DocumentCompressorPipeline(transformers=[filter])
langchain.retrievers.document_compressors.DocumentCompressorPipeline
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sentence-transformers > /dev/null') from langchain.chains import LLMChain, StuffDocumentsChain from langchain.prompts import PromptTemplate from langchain_community.document_transformers import ( LongContextReorder, ) from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import Chroma from langchain_openai import OpenAI embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") texts = [ "Basquetball is a great sport.", "Fly me to the moon is one of my favourite songs.", "The Celtics are my favourite team.", "This is a document about the Boston Celtics", "I simply love going to the movies", "The Boston Celtics won the game by 20 points", "This is just a random text.", "Elden Ring is one of the best games in the last 15 years.", "L. Kornet is one of the best Celtics players.", "Larry Bird was an iconic NBA player.", ] retriever = Chroma.from_texts(texts, embedding=embeddings).as_retriever( search_kwargs={"k": 10} ) query = "What can you tell me about the Celtics?" docs = retriever.get_relevant_documents(query) docs reordering =
LongContextReorder()
langchain_community.document_transformers.LongContextReorder
import runhouse as rh from langchain_community.embeddings import ( SelfHostedEmbeddings, SelfHostedHuggingFaceEmbeddings, SelfHostedHuggingFaceInstructEmbeddings, ) gpu = rh.cluster(name="rh-a10x", instance_type="A100:1", use_spot=False) embeddings = SelfHostedHuggingFaceEmbeddings(hardware=gpu) text = "This is a test document." query_result = embeddings.embed_query(text) embeddings =
SelfHostedHuggingFaceInstructEmbeddings(hardware=gpu)
langchain_community.embeddings.SelfHostedHuggingFaceInstructEmbeddings
get_ipython().run_line_magic('pip', 'install --upgrade --quiet scikit-learn') from langchain_community.retrievers import TFIDFRetriever retriever = TFIDFRetriever.from_texts(["foo", "bar", "world", "hello", "foo bar"]) from langchain_core.documents import Document retriever = TFIDFRetriever.from_documents( [ Document(page_content="foo"), Document(page_content="bar"), Document(page_content="world"), Document(page_content="hello"), Document(page_content="foo bar"), ] ) result = retriever.get_relevant_documents("foo") result retriever.save_local("testing.pkl") retriever_copy =
TFIDFRetriever.load_local("testing.pkl")
langchain_community.retrievers.TFIDFRetriever.load_local
get_ipython().run_line_magic('pip', 'install --upgrade --quiet redis redisvl langchain-openai tiktoken') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") from langchain_openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() redis_url = "redis://localhost:6379" redis_url = "redis://:secret@redis:7379/2" redis_url = "redis://joe:secret@redis/0" redis_url = "redis+sentinel://localhost:26379" redis_url = "redis+sentinel://joe:secret@redis" redis_url = "redis+sentinel://redis:26379/zone-1/2" redis_url = "rediss://localhost:6379" redis_url = "rediss+sentinel://localhost" metadata = [ { "user": "john", "age": 18, "job": "engineer", "credit_score": "high", }, { "user": "derrick", "age": 45, "job": "doctor", "credit_score": "low", }, { "user": "nancy", "age": 94, "job": "doctor", "credit_score": "high", }, { "user": "tyler", "age": 100, "job": "engineer", "credit_score": "high", }, { "user": "joe", "age": 35, "job": "dentist", "credit_score": "medium", }, ] texts = ["foo", "foo", "foo", "bar", "bar"] from langchain_community.vectorstores.redis import Redis rds = Redis.from_texts( texts, embeddings, metadatas=metadata, redis_url="redis://localhost:6379", index_name="users", ) rds.index_name get_ipython().system('rvl index listall') get_ipython().system('rvl index info -i users') get_ipython().system('rvl stats -i users') results = rds.similarity_search("foo") print(results[0].page_content) results = rds.similarity_search("foo", k=3) meta = results[1].metadata print("Key of the document in Redis: ", meta.pop("id")) print("Metadata of the document: ", meta) results = rds.similarity_search_with_score("foo", k=5) for result in results: print(f"Content: {result[0].page_content} --- Score: {result[1]}") results = rds.similarity_search_with_score("foo", k=5, distance_threshold=0.1) for result in results: print(f"Content: {result[0].page_content} --- Score: {result[1]}") results = rds.similarity_search_with_relevance_scores("foo", k=5) for result in results: print(f"Content: {result[0].page_content} --- Similiarity: {result[1]}") results = rds.similarity_search_with_relevance_scores("foo", k=5, score_threshold=0.9) for result in results: print(f"Content: {result[0].page_content} --- Similarity: {result[1]}") new_document = ["baz"] new_metadata = [{"user": "sam", "age": 50, "job": "janitor", "credit_score": "high"}] rds.add_texts(new_document, new_metadata) results = rds.similarity_search("baz", k=3) print(results[0].metadata) results = rds.max_marginal_relevance_search("foo") results = rds.max_marginal_relevance_search("foo", lambda_mult=0.1) rds.write_schema("redis_schema.yaml") new_rds = Redis.from_existing_index( embeddings, index_name="users", redis_url="redis://localhost:6379", schema="redis_schema.yaml", ) results = new_rds.similarity_search("foo", k=3) print(results[0].metadata) new_rds.schema == rds.schema index_schema = { "tag": [{"name": "credit_score"}], "text": [{"name": "user"}, {"name": "job"}], "numeric": [{"name": "age"}], } rds, keys = Redis.from_texts_return_keys( texts, embeddings, metadatas=metadata, redis_url="redis://localhost:6379", index_name="users_modified", index_schema=index_schema, # pass in the new index schema ) from langchain_community.vectorstores.redis import RedisText is_engineer = RedisText("job") == "engineer" results = rds.similarity_search("foo", k=3, filter=is_engineer) print("Job:", results[0].metadata["job"]) print("Engineers in the dataset:", len(results)) starts_with_doc = RedisText("job") % "doc*" results = rds.similarity_search("foo", k=3, filter=starts_with_doc) for result in results: print("Job:", result.metadata["job"]) print("Jobs in dataset that start with 'doc':", len(results)) from langchain_community.vectorstores.redis import RedisNum is_over_18 =
RedisNum("age")
langchain_community.vectorstores.redis.RedisNum
from langchain.prompts import ChatMessagePromptTemplate prompt = "May the {subject} be with you" chat_message_prompt = ChatMessagePromptTemplate.from_template( role="Jedi", template=prompt ) chat_message_prompt.format(subject="force") from langchain.prompts import ( ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, ) human_prompt = "Summarize our conversation so far in {word_count} words." human_message_template =
HumanMessagePromptTemplate.from_template(human_prompt)
langchain.prompts.HumanMessagePromptTemplate.from_template
from getpass import getpass from langchain_community.document_loaders.larksuite import LarkSuiteDocLoader DOMAIN = input("larksuite domain") ACCESS_TOKEN = getpass("larksuite tenant_access_token or user_access_token") DOCUMENT_ID = input("larksuite document id") from pprint import pprint larksuite_loader = LarkSuiteDocLoader(DOMAIN, ACCESS_TOKEN, DOCUMENT_ID) docs = larksuite_loader.load() pprint(docs) from langchain.chains.summarize import load_summarize_chain from langchain_community.llms.fake import FakeListLLM llm = FakeListLLM() chain =
load_summarize_chain(llm, chain_type="map_reduce")
langchain.chains.summarize.load_summarize_chain
import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") from typing import List, Tuple from dotenv import load_dotenv load_dotenv() from langchain_community.document_loaders import TextLoader from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import Lantern from langchain_core.documents import Document from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings =
OpenAIEmbeddings()
langchain_community.embeddings.OpenAIEmbeddings
from typing import List from langchain.output_parsers import YamlOutputParser from langchain.prompts import PromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field from langchain_openai import ChatOpenAI model = ChatOpenAI(temperature=0) class Joke(BaseModel): setup: str = Field(description="question to set up a joke") punchline: str =
Field(description="answer to resolve the joke")
langchain_core.pydantic_v1.Field
from langchain_community.document_loaders import AirbyteJSONLoader get_ipython().system('ls /tmp/airbyte_local/json_data/') loader =
AirbyteJSONLoader("/tmp/airbyte_local/json_data/_airbyte_raw_pokemon.jsonl")
langchain_community.document_loaders.AirbyteJSONLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langsmith langchainhub --quiet') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai tiktoken pandas duckduckgo-search --quiet') import os from uuid import uuid4 unique_id = uuid4().hex[0:8] os.environ["LANGCHAIN_TRACING_V2"] = "true" os.environ["LANGCHAIN_PROJECT"] = f"Tracing Walkthrough - {unique_id}" os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com" os.environ["LANGCHAIN_API_KEY"] = "<YOUR-API-KEY>" # Update to your API key os.environ["OPENAI_API_KEY"] = "<YOUR-OPENAI-API-KEY>" from langsmith import Client client = Client() from langchain import hub from langchain.agents import AgentExecutor from langchain.agents.format_scratchpad.openai_tools import ( format_to_openai_tool_messages, ) from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser from langchain_community.tools import DuckDuckGoSearchResults from langchain_openai import ChatOpenAI prompt = hub.pull("wfh/langsmith-agent-prompt:5d466cbc") llm = ChatOpenAI( model="gpt-3.5-turbo-16k", temperature=0, ) tools = [ DuckDuckGoSearchResults( name="duck_duck_go" ), # General internet search using DuckDuckGo ] llm_with_tools = llm.bind_tools(tools) runnable_agent = ( { "input": lambda x: x["input"], "agent_scratchpad": lambda x: format_to_openai_tool_messages( x["intermediate_steps"] ), } | prompt | llm_with_tools |
OpenAIToolsAgentOutputParser()
langchain.agents.output_parsers.openai_tools.OpenAIToolsAgentOutputParser
REGION = "us-central1" # @param {type:"string"} INSTANCE = "test-instance" # @param {type:"string"} DATABASE = "test" # @param {type:"string"} TABLE_NAME = "test-default" # @param {type:"string"} get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-cloud-sql-mysql') PROJECT_ID = "my-project-id" # @param {type:"string"} get_ipython().system('gcloud config set project {PROJECT_ID}') from google.colab import auth auth.authenticate_user() get_ipython().system('gcloud services enable sqladmin.googleapis.com') from langchain_google_cloud_sql_mysql import MySQLEngine engine = MySQLEngine.from_instance( project_id=PROJECT_ID, region=REGION, instance=INSTANCE, database=DATABASE ) engine.init_document_table(TABLE_NAME, overwrite_existing=True) from langchain_core.documents import Document from langchain_google_cloud_sql_mysql import MySQLDocumentSaver test_docs = [ Document( page_content="Apple Granny Smith 150 0.99 1", metadata={"fruit_id": 1}, ), Document( page_content="Banana Cavendish 200 0.59 0", metadata={"fruit_id": 2}, ), Document( page_content="Orange Navel 80 1.29 1", metadata={"fruit_id": 3}, ), ] saver = MySQLDocumentSaver(engine=engine, table_name=TABLE_NAME) saver.add_documents(test_docs) from langchain_google_cloud_sql_mysql import MySQLLoader loader = MySQLLoader(engine=engine, table_name=TABLE_NAME) docs = loader.lazy_load() for doc in docs: print("Loaded documents:", doc) from langchain_google_cloud_sql_mysql import MySQLLoader loader =
MySQLLoader( engine=engine, query=f"select * from `{TABLE_NAME}` where JSON_EXTRACT(langchain_metadata, '$.fruit_id')
langchain_google_cloud_sql_mysql.MySQLLoader
from langchain.agents import AgentType, initialize_agent from langchain.chains import LLMMathChain from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.tools import Tool from langchain_openai import ChatOpenAI get_ipython().run_line_magic('pip', 'install --upgrade --quiet numexpr') llm = ChatOpenAI(temperature=0, model="gpt-4") llm_math_chain =
LLMMathChain.from_llm(llm=llm, verbose=True)
langchain.chains.LLMMathChain.from_llm
get_ipython().run_line_magic('pip', 'install --upgrade --quiet transformers huggingface_hub > /dev/null') from langchain.agents import load_huggingface_tool tool =
load_huggingface_tool("lysandre/hf-model-downloads")
langchain.agents.load_huggingface_tool
get_ipython().run_line_magic('pip', 'install --upgrade --quiet promptlayer --upgrade') import promptlayer # Don't forget this 🍰 from langchain.callbacks import PromptLayerCallbackHandler from langchain.schema import ( HumanMessage, ) from langchain_openai import ChatOpenAI chat_llm = ChatOpenAI( temperature=0, callbacks=[
PromptLayerCallbackHandler(pl_tags=["chatopenai"])
langchain.callbacks.PromptLayerCallbackHandler
from langchain_community.document_loaders import UnstructuredURLLoader urls = [ "https://www.understandingwar.org/backgrounder/russian-offensive-campaign-assessment-february-8-2023", "https://www.understandingwar.org/backgrounder/russian-offensive-campaign-assessment-february-9-2023", ] loader =
UnstructuredURLLoader(urls=urls)
langchain_community.document_loaders.UnstructuredURLLoader
from langchain_experimental.pal_chain import PALChain from langchain_openai import OpenAI llm = OpenAI(temperature=0, max_tokens=512) pal_chain = PALChain.from_math_prompt(llm, verbose=True) question = "Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. If Cindy has four pets, how many total pets do the three have?" pal_chain.run(question) pal_chain =
PALChain.from_colored_object_prompt(llm, verbose=True)
langchain_experimental.pal_chain.PALChain.from_colored_object_prompt
from langchain.chains import LLMSummarizationCheckerChain from langchain_openai import OpenAI llm = OpenAI(temperature=0) checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=2) text = """ Your 9-year old might like these recent discoveries made by The James Webb Space Telescope (JWST): • In 2023, The JWST spotted a number of galaxies nicknamed "green peas." They were given this name because they are small, round, and green, like peas. • The telescope captured images of galaxies that are over 13 billion years old. This means that the light from these galaxies has been traveling for over 13 billion years to reach us. • JWST took the very first pictures of a planet outside of our own solar system. These distant worlds are called "exoplanets." Exo means "from outside." These discoveries can spark a child's imagination about the infinite wonders of the universe.""" checker_chain.run(text) from langchain.chains import LLMSummarizationCheckerChain from langchain_openai import OpenAI llm = OpenAI(temperature=0) checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=3) text = "The Greenland Sea is an outlying portion of the Arctic Ocean located between Iceland, Norway, the Svalbard archipelago and Greenland. It has an area of 465,000 square miles and is one of five oceans in the world, alongside the Pacific Ocean, Atlantic Ocean, Indian Ocean, and the Southern Ocean. It is the smallest of the five oceans and is covered almost entirely by water, some of which is frozen in the form of glaciers and icebergs. The sea is named after the island of Greenland, and is the Arctic Ocean's main outlet to the Atlantic. It is often frozen over so navigation is limited, and is considered the northern branch of the Norwegian Sea." checker_chain.run(text) from langchain.chains import LLMSummarizationCheckerChain from langchain_openai import OpenAI llm = OpenAI(temperature=0) checker_chain =
LLMSummarizationCheckerChain.from_llm(llm, max_checks=3, verbose=True)
langchain.chains.LLMSummarizationCheckerChain.from_llm
from langchain_community.utilities.dataforseo_api_search import DataForSeoAPIWrapper import os os.environ["DATAFORSEO_LOGIN"] = "your_api_access_username" os.environ["DATAFORSEO_PASSWORD"] = "your_api_access_password" wrapper =
DataForSeoAPIWrapper()
langchain_community.utilities.dataforseo_api_search.DataForSeoAPIWrapper
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken langchain-openai python-dotenv datasets langchain deeplake beautifulsoup4 html2text ragas') ORG_ID = "..." import getpass import os from langchain.chains import RetrievalQA from langchain.vectorstores.deeplake import DeepLake from langchain_openai import OpenAIChat, OpenAIEmbeddings os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI API token: ") os.environ["ACTIVELOOP_TOKEN"] = getpass.getpass( "Enter your ActiveLoop API token: " ) # Get your API token from https://app.activeloop.ai, click on your profile picture in the top right corner, and select "API Tokens" token = os.getenv("ACTIVELOOP_TOKEN") openai_embeddings = OpenAIEmbeddings() db = DeepLake( dataset_path=f"hub://{ORG_ID}/deeplake-docs-deepmemory", # org_id stands for your username or organization from activeloop embedding=openai_embeddings, runtime={"tensor_db": True}, token=token, read_only=False, ) from urllib.parse import urljoin import requests from bs4 import BeautifulSoup def get_all_links(url): response = requests.get(url) if response.status_code != 200: print(f"Failed to retrieve the page: {url}") return [] soup = BeautifulSoup(response.content, "html.parser") links = [ urljoin(url, a["href"]) for a in soup.find_all("a", href=True) if a["href"] ] return links base_url = "https://docs.deeplake.ai/en/latest/" all_links = get_all_links(base_url) from langchain.document_loaders import AsyncHtmlLoader loader = AsyncHtmlLoader(all_links) docs = loader.load() from langchain.document_transformers import Html2TextTransformer html2text = Html2TextTransformer() docs_transformed = html2text.transform_documents(docs) from langchain_text_splitters import RecursiveCharacterTextSplitter chunk_size = 4096 docs_new = [] text_splitter = RecursiveCharacterTextSplitter( chunk_size=chunk_size, ) for doc in docs_transformed: if len(doc.page_content) < chunk_size: docs_new.append(doc) else: docs = text_splitter.create_documents([doc.page_content]) docs_new.extend(docs) docs = db.add_documents(docs_new) from typing import List from langchain.chains.openai_functions import ( create_structured_output_chain, ) from langchain_core.messages import HumanMessage, SystemMessage from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate from langchain_openai import ChatOpenAI from pydantic import BaseModel, Field docs = db.vectorstore.dataset.text.data(fetch_chunks=True, aslist=True)["value"] ids = db.vectorstore.dataset.id.data(fetch_chunks=True, aslist=True)["value"] llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) class Questions(BaseModel): """Identifying information about a person.""" question: str = Field(..., description="Questions about text") prompt_msgs = [ SystemMessage( content="You are a world class expert for generating questions based on provided context. \ You make sure the question can be answered by the text." ), HumanMessagePromptTemplate.from_template( "Use the given text to generate a question from the following input: {input}" ), HumanMessage(content="Tips: Make sure to answer in the correct format"), ] prompt = ChatPromptTemplate(messages=prompt_msgs) chain =
create_structured_output_chain(Questions, llm, prompt, verbose=True)
langchain.chains.openai_functions.create_structured_output_chain
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent from langchain.chains import LLMChain from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory from langchain.prompts import PromptTemplate from langchain_community.utilities import GoogleSearchAPIWrapper from langchain_openai import OpenAI template = """This is a conversation between a human and a bot: {chat_history} Write a summary of the conversation for {input}: """ prompt = PromptTemplate(input_variables=["input", "chat_history"], template=template) memory = ConversationBufferMemory(memory_key="chat_history") readonlymemory = ReadOnlySharedMemory(memory=memory) summary_chain = LLMChain( llm=OpenAI(), prompt=prompt, verbose=True, memory=readonlymemory, # use the read-only memory to prevent the tool from modifying the memory ) search =
GoogleSearchAPIWrapper()
langchain_community.utilities.GoogleSearchAPIWrapper
SOURCE = "test" # @param {type:"Query"|"CollectionGroup"|"DocumentReference"|"string"} get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-firestore') PROJECT_ID = "my-project-id" # @param {type:"string"} get_ipython().system('gcloud config set project {PROJECT_ID}') from google.colab import auth auth.authenticate_user() get_ipython().system('gcloud services enable firestore.googleapis.com') from langchain_core.documents.base import Document from langchain_google_firestore import FirestoreSaver saver = FirestoreSaver() data = [Document(page_content="Hello, World!")] saver.upsert_documents(data) saver =
FirestoreSaver("Collection")
langchain_google_firestore.FirestoreSaver
from langchain.chains import LLMSummarizationCheckerChain from langchain_openai import OpenAI llm = OpenAI(temperature=0) checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=2) text = """ Your 9-year old might like these recent discoveries made by The James Webb Space Telescope (JWST): • In 2023, The JWST spotted a number of galaxies nicknamed "green peas." They were given this name because they are small, round, and green, like peas. • The telescope captured images of galaxies that are over 13 billion years old. This means that the light from these galaxies has been traveling for over 13 billion years to reach us. • JWST took the very first pictures of a planet outside of our own solar system. These distant worlds are called "exoplanets." Exo means "from outside." These discoveries can spark a child's imagination about the infinite wonders of the universe.""" checker_chain.run(text) from langchain.chains import LLMSummarizationCheckerChain from langchain_openai import OpenAI llm = OpenAI(temperature=0) checker_chain =
LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=3)
langchain.chains.LLMSummarizationCheckerChain.from_llm
from langchain.output_parsers import XMLOutputParser from langchain.prompts import PromptTemplate from langchain_community.chat_models import ChatAnthropic model =
ChatAnthropic(model="claude-2", max_tokens_to_sample=512, temperature=0.1)
langchain_community.chat_models.ChatAnthropic
import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass() from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import FAISS from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() db =
FAISS.from_documents(docs, embeddings)
langchain_community.vectorstores.FAISS.from_documents
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-openai langchain-anthropic langchain-community wikipedia') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass() os.environ["ANTHROPIC_API_KEY"] = getpass.getpass() from langchain_community.retrievers import WikipediaRetriever from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) wiki = WikipediaRetriever(top_k_results=6, doc_content_chars_max=2000) prompt = ChatPromptTemplate.from_messages( [ ( "system", "You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\n\nHere are the Wikipedia articles:{context}", ), ("human", "{question}"), ] ) prompt.pretty_print() from operator import itemgetter from typing import List from langchain_core.documents import Document from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import ( RunnableLambda, RunnableParallel, RunnablePassthrough, ) def format_docs(docs: List[Document]) -> str: """Convert Documents to a single string.:""" formatted = [ f"Article Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}" for doc in docs ] return "\n\n" + "\n\n".join(formatted) format = itemgetter("docs") | RunnableLambda(format_docs) answer = prompt | llm | StrOutputParser() chain = ( RunnableParallel(question=RunnablePassthrough(), docs=wiki) .assign(context=format) .assign(answer=answer) .pick(["answer", "docs"]) ) chain.invoke("How fast are cheetahs?") from langchain_core.pydantic_v1 import BaseModel, Field class cited_answer(BaseModel): """Answer the user question based only on the given sources, and cite the sources used.""" answer: str = Field( ..., description="The answer to the user question, which is based only on the given sources.", ) citations: List[int] = Field( ..., description="The integer IDs of the SPECIFIC sources which justify the answer.", ) llm_with_tool = llm.bind_tools( [cited_answer], tool_choice="cited_answer", ) example_q = """What Brian's height? Source: 1 Information: Suzy is 6'2" Source: 2 Information: Jeremiah is blonde Source: 3 Information: Brian is 3 inches shorted than Suzy""" llm_with_tool.invoke(example_q) from langchain.output_parsers.openai_tools import JsonOutputKeyToolsParser output_parser = JsonOutputKeyToolsParser(key_name="cited_answer", return_single=True) (llm_with_tool | output_parser).invoke(example_q) def format_docs_with_id(docs: List[Document]) -> str: formatted = [ f"Source ID: {i}\nArticle Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}" for i, doc in enumerate(docs) ] return "\n\n" + "\n\n".join(formatted) format_1 = itemgetter("docs") | RunnableLambda(format_docs_with_id) answer_1 = prompt | llm_with_tool | output_parser chain_1 = ( RunnableParallel(question=RunnablePassthrough(), docs=wiki) .assign(context=format_1) .assign(cited_answer=answer_1) .pick(["cited_answer", "docs"]) ) chain_1.invoke("How fast are cheetahs?") class Citation(BaseModel): source_id: int = Field( ..., description="The integer ID of a SPECIFIC source which justifies the answer.", ) quote: str = Field( ..., description="The VERBATIM quote from the specified source that justifies the answer.", ) class quoted_answer(BaseModel): """Answer the user question based only on the given sources, and cite the sources used.""" answer: str = Field( ..., description="The answer to the user question, which is based only on the given sources.", ) citations: List[Citation] = Field( ..., description="Citations from the given sources that justify the answer." ) output_parser_2 = JsonOutputKeyToolsParser(key_name="quoted_answer", return_single=True) llm_with_tool_2 = llm.bind_tools( [quoted_answer], tool_choice="quoted_answer", ) format_2 = itemgetter("docs") | RunnableLambda(format_docs_with_id) answer_2 = prompt | llm_with_tool_2 | output_parser_2 chain_2 = ( RunnableParallel(question=RunnablePassthrough(), docs=wiki) .assign(context=format_2) .assign(quoted_answer=answer_2) .pick(["quoted_answer", "docs"]) ) chain_2.invoke("How fast are cheetahs?") from langchain_anthropic import ChatAnthropicMessages anthropic = ChatAnthropicMessages(model_name="claude-instant-1.2") system = """You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, \ answer the user question and provide citations. If none of the articles answer the question, just say you don't know. Remember, you must return both an answer and citations. A citation consists of a VERBATIM quote that \ justifies the answer and the ID of the quote article. Return a citation for every quote across all articles \ that justify the answer. Use the following format for your final output: <cited_answer> <answer></answer> <citations> <citation><source_id></source_id><quote></quote></citation> <citation><source_id></source_id><quote></quote></citation> ... </citations> </cited_answer> Here are the Wikipedia articles:{context}""" prompt_3 = ChatPromptTemplate.from_messages( [("system", system), ("human", "{question}")] ) from langchain_core.output_parsers import XMLOutputParser def format_docs_xml(docs: List[Document]) -> str: formatted = [] for i, doc in enumerate(docs): doc_str = f"""\ <source id=\"{i}\"> <title>{doc.metadata['title']}</title> <article_snippet>{doc.page_content}</article_snippet> </source>""" formatted.append(doc_str) return "\n\n<sources>" + "\n".join(formatted) + "</sources>" format_3 = itemgetter("docs") | RunnableLambda(format_docs_xml) answer_3 = prompt_3 | anthropic | XMLOutputParser() | itemgetter("cited_answer") chain_3 = ( RunnableParallel(question=RunnablePassthrough(), docs=wiki) .assign(context=format_3) .assign(cited_answer=answer_3) .pick(["cited_answer", "docs"]) ) chain_3.invoke("How fast are cheetahs?") from langchain.retrievers.document_compressors import EmbeddingsFilter from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter splitter = RecursiveCharacterTextSplitter( chunk_size=400, chunk_overlap=0, separators=["\n\n", "\n", ".", " "], keep_separator=False, ) compressor = EmbeddingsFilter(embeddings=OpenAIEmbeddings(), k=10) def split_and_filter(input) -> List[Document]: docs = input["docs"] question = input["question"] split_docs = splitter.split_documents(docs) stateful_docs = compressor.compress_documents(split_docs, question) return [stateful_doc for stateful_doc in stateful_docs] retrieve = ( RunnableParallel(question=RunnablePassthrough(), docs=wiki) | split_and_filter ) docs = retrieve.invoke("How fast are cheetahs?") for doc in docs: print(doc.page_content) print("\n\n") chain_4 = ( RunnableParallel(question=RunnablePassthrough(), docs=retrieve) .assign(context=format) .assign(answer=answer) .pick(["answer", "docs"]) ) chain_4.invoke("How fast are cheetahs?") class Citation(BaseModel): source_id: int = Field( ..., description="The integer ID of a SPECIFIC source which justifies the answer.", ) quote: str = Field( ..., description="The VERBATIM quote from the specified source that justifies the answer.", ) class annotated_answer(BaseModel): """Annotate the answer to the user question with quote citations that justify the answer.""" citations: List[Citation] = Field( ..., description="Citations from the given sources that justify the answer." ) llm_with_tools_5 = llm.bind_tools( [annotated_answer], tool_choice="annotated_answer", ) from langchain_core.prompts import MessagesPlaceholder prompt_5 = ChatPromptTemplate.from_messages( [ ( "system", "You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\n\nHere are the Wikipedia articles:{context}", ), ("human", "{question}"), MessagesPlaceholder("chat_history", optional=True), ] ) answer_5 = prompt_5 | llm annotation_chain = ( prompt_5 | llm_with_tools_5 |
JsonOutputKeyToolsParser(key_name="annotated_answer", return_single=True)
langchain.output_parsers.openai_tools.JsonOutputKeyToolsParser
from langchain.agents import AgentType, initialize_agent from langchain.tools import BearlyInterpreterTool from langchain_openai import ChatOpenAI bearly_tool =
BearlyInterpreterTool(api_key="...")
langchain.tools.BearlyInterpreterTool
import os from langchain.retrievers import AzureCognitiveSearchRetriever os.environ["AZURE_COGNITIVE_SEARCH_SERVICE_NAME"] = "<YOUR_ACS_SERVICE_NAME>" os.environ["AZURE_COGNITIVE_SEARCH_INDEX_NAME"] = "<YOUR_ACS_INDEX_NAME>" os.environ["AZURE_COGNITIVE_SEARCH_API_KEY"] = "<YOUR_API_KEY>" retriever =
AzureCognitiveSearchRetriever(content_key="content", top_k=10)
langchain.retrievers.AzureCognitiveSearchRetriever
from langchain_community.document_loaders.chatgpt import ChatGPTLoader loader =
ChatGPTLoader(log_file="./example_data/fake_conversations.json", num_logs=1)
langchain_community.document_loaders.chatgpt.ChatGPTLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sodapy') get_ipython().run_line_magic('pip', 'install --upgrade --quiet pandas') get_ipython().run_line_magic('pip', 'install --upgrade --quiet geopandas') import ast import geopandas as gpd import pandas as pd from langchain_community.document_loaders import OpenCityDataLoader dataset = "tmnf-yvry" # San Francisco crime data loader = OpenCityDataLoader(city_id="data.sfgov.org", dataset_id=dataset, limit=5000) docs = loader.load() df = pd.DataFrame([ast.literal_eval(d.page_content) for d in docs]) df["Latitude"] = df["location"].apply(lambda loc: loc["coordinates"][1]) df["Longitude"] = df["location"].apply(lambda loc: loc["coordinates"][0]) gdf = gpd.GeoDataFrame( df, geometry=gpd.points_from_xy(df.Longitude, df.Latitude), crs="EPSG:4326" ) gdf = gdf[ (gdf["Longitude"] >= -123.173825) & (gdf["Longitude"] <= -122.281780) & (gdf["Latitude"] >= 37.623983) & (gdf["Latitude"] <= 37.929824) ] import matplotlib.pyplot as plt sf = gpd.read_file("https://data.sfgov.org/resource/3psu-pn9h.geojson") fig, ax = plt.subplots(figsize=(10, 10)) sf.plot(ax=ax, color="white", edgecolor="black") gdf.plot(ax=ax, color="red", markersize=5) plt.show() from langchain_community.document_loaders import GeoDataFrameLoader loader =
GeoDataFrameLoader(data_frame=gdf, page_content_column="geometry")
langchain_community.document_loaders.GeoDataFrameLoader
meals = [ "Beef Enchiladas with Feta cheese. Mexican-Greek fusion", "Chicken Flatbreads with red sauce. Italian-Mexican fusion", "Veggie sweet potato quesadillas with vegan cheese", "One-Pan Tortelonni bake with peppers and onions", ] from langchain_openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo-instruct") from langchain.prompts import PromptTemplate PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}". Embed the meal into the given text: "{text_to_personalize}". Prepend a personalized message including the user's name "{user}" and their preference "{preference}". Make it sound good. """ PROMPT = PromptTemplate( input_variables=["meal", "text_to_personalize", "user", "preference"], template=PROMPT_TEMPLATE, ) import langchain_experimental.rl_chain as rl_chain chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs \ believe you will love it!", ) print(response["response"]) for _ in range(5): try: response = chain.run( meal=
rl_chain.ToSelectFrom(meals)
langchain_experimental.rl_chain.ToSelectFrom
get_ipython().run_line_magic('pip', 'install "pgvecto_rs[sdk]"') from typing import List from langchain.docstore.document import Document from langchain_community.document_loaders import TextLoader from langchain_community.embeddings.fake import FakeEmbeddings from langchain_community.vectorstores.pgvecto_rs import PGVecto_rs from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings =
FakeEmbeddings(size=3)
langchain_community.embeddings.fake.FakeEmbeddings
REGION = "us-central1" # @param {type:"string"} INSTANCE = "test-instance" # @param {type:"string"} DB_USER = "sqlserver" # @param {type:"string"} DB_PASS = "password" # @param {type:"string"} DATABASE = "test" # @param {type:"string"} TABLE_NAME = "test-default" # @param {type:"string"} get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-google-cloud-sql-mssql') from google.colab import auth auth.authenticate_user() PROJECT_ID = "my-project-id" # @param {type:"string"} get_ipython().system('gcloud config set project {PROJECT_ID}') get_ipython().system('gcloud services enable sqladmin.googleapis.com') from langchain_google_cloud_sql_mssql import MSSQLEngine engine = MSSQLEngine.from_instance( project_id=PROJECT_ID, region=REGION, instance=INSTANCE, database=DATABASE, user=DB_USER, password=DB_PASS, ) engine.init_document_table(TABLE_NAME, overwrite_existing=True) from langchain_core.documents import Document from langchain_google_cloud_sql_mssql import MSSQLDocumentSaver test_docs = [ Document( page_content="Apple Granny Smith 150 0.99 1", metadata={"fruit_id": 1}, ), Document( page_content="Banana Cavendish 200 0.59 0", metadata={"fruit_id": 2}, ), Document( page_content="Orange Navel 80 1.29 1", metadata={"fruit_id": 3}, ), ] saver = MSSQLDocumentSaver(engine=engine, table_name=TABLE_NAME) saver.add_documents(test_docs) from langchain_google_cloud_sql_mssql import MSSQLLoader loader = MSSQLLoader(engine=engine, table_name=TABLE_NAME) docs = loader.lazy_load() for doc in docs: print("Loaded documents:", doc) from langchain_google_cloud_sql_mssql import MSSQLLoader loader =
MSSQLLoader( engine=engine, query=f"select * from \"{TABLE_NAME}\" where JSON_VALUE(langchain_metadata, '$.fruit_id')
langchain_google_cloud_sql_mssql.MSSQLLoader
from langchain.indexes import VectorstoreIndexCreator from langchain_community.document_loaders import IuguLoader iugu_loader =
IuguLoader("charges")
langchain_community.document_loaders.IuguLoader
import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pypdf pymongo langchain-openai tiktoken') import getpass MONGODB_ATLAS_CLUSTER_URI = getpass.getpass("MongoDB Atlas Cluster URI:") from pymongo import MongoClient client = MongoClient(MONGODB_ATLAS_CLUSTER_URI) DB_NAME = "langchain_db" COLLECTION_NAME = "test" ATLAS_VECTOR_SEARCH_INDEX_NAME = "index_name" MONGODB_COLLECTION = client[DB_NAME][COLLECTION_NAME] from langchain_community.document_loaders import PyPDFLoader loader = PyPDFLoader("https://arxiv.org/pdf/2303.08774.pdf") data = loader.load() from langchain_text_splitters import RecursiveCharacterTextSplitter text_splitter =
RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=150)
langchain_text_splitters.RecursiveCharacterTextSplitter
from langchain.chains import ConversationChain from langchain.memory import ConversationBufferMemory from langchain_openai import OpenAI llm = OpenAI(temperature=0) conversation = ConversationChain( llm=llm, verbose=True, memory=ConversationBufferMemory() ) conversation.predict(input="Hi there!") conversation.predict(input="What's the weather?") from langchain.prompts.prompt import PromptTemplate template = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. Current conversation: {history} Human: {input} AI Assistant:""" PROMPT = PromptTemplate(input_variables=["history", "input"], template=template) conversation = ConversationChain( prompt=PROMPT, llm=llm, verbose=True, memory=ConversationBufferMemory(ai_prefix="AI Assistant"), ) conversation.predict(input="Hi there!") conversation.predict(input="What's the weather?") from langchain.prompts.prompt import PromptTemplate template = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. Current conversation: {history} Friend: {input} AI:""" PROMPT =
PromptTemplate(input_variables=["history", "input"], template=template)
langchain.prompts.prompt.PromptTemplate
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pdfminer') from langchain_community.document_loaders.image import UnstructuredImageLoader loader = UnstructuredImageLoader("layout-parser-paper-fast.jpg") data = loader.load() data[0] loader =
UnstructuredImageLoader("layout-parser-paper-fast.jpg", mode="elements")
langchain_community.document_loaders.image.UnstructuredImageLoader