prompt
stringlengths 51
10k
| completion
stringlengths 8
362
| api
stringlengths 18
90
|
---|---|---|
from datetime import datetime, timedelta
import faiss
from langchain.docstore import InMemoryDocstore
from langchain.retrievers import TimeWeightedVectorStoreRetriever
from langchain_community.vectorstores import FAISS
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
embeddings_model = OpenAIEmbeddings()
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model, index, | InMemoryDocstore({}) | langchain.docstore.InMemoryDocstore |
from langchain_community.llms import Ollama
llm = Ollama(model="llama2")
llm("The first man on the moon was ...")
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
llm = Ollama(
model="llama2", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])
)
llm("The first man on the moon was ...")
from langchain_community.llms import Ollama
llm = Ollama(model="llama2:13b")
llm("The first man on the moon was ... think step by step")
get_ipython().run_line_magic('env', 'CMAKE_ARGS="-DLLAMA_METAL=on"')
get_ipython().run_line_magic('env', 'FORCE_CMAKE=1')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet llama-cpp-python --no-cache-dirclear')
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_community.llms import LlamaCpp
llm = LlamaCpp(
model_path="/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin",
n_gpu_layers=1,
n_batch=512,
n_ctx=2048,
f16_kv=True,
callback_manager=CallbackManager([ | StreamingStdOutCallbackHandler() | langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler |
import os
from langchain.chains import ConversationalRetrievalChain
from langchain_community.vectorstores import Vectara
from langchain_openai import OpenAI
from langchain_community.document_loaders import TextLoader
loader = TextLoader("state_of_the_union.txt")
documents = loader.load()
vectara = Vectara.from_documents(documents, embedding=None)
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
openai_api_key = os.environ["OPENAI_API_KEY"]
llm = OpenAI(openai_api_key=openai_api_key, temperature=0)
retriever = vectara.as_retriever()
d = retriever.get_relevant_documents(
"What did the president say about Ketanji Brown Jackson", k=2
)
print(d)
bot = ConversationalRetrievalChain.from_llm(
llm, retriever, memory=memory, verbose=False
)
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query})
result["answer"]
query = "Did he mention who she suceeded"
result = bot.invoke({"question": query})
result["answer"]
bot = ConversationalRetrievalChain.from_llm(
OpenAI(temperature=0), vectara.as_retriever()
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["answer"]
chat_history = [(query, result["answer"])]
query = "Did he mention who she suceeded"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["answer"]
bot = ConversationalRetrievalChain.from_llm(
llm, vectara.as_retriever(), return_source_documents=True
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["source_documents"][0]
from langchain.chains import LLMChain
from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
from langchain.chains.question_answering import load_qa_chain
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = load_qa_chain(llm, chain_type="map_reduce")
chain = ConversationalRetrievalChain(
retriever=vectara.as_retriever(),
question_generator=question_generator,
combine_docs_chain=doc_chain,
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = chain({"question": query, "chat_history": chat_history})
result["answer"]
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = load_qa_with_sources_chain(llm, chain_type="map_reduce")
chain = ConversationalRetrievalChain(
retriever=vectara.as_retriever(),
question_generator=question_generator,
combine_docs_chain=doc_chain,
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = chain({"question": query, "chat_history": chat_history})
result["answer"]
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains.conversational_retrieval.prompts import (
CONDENSE_QUESTION_PROMPT,
QA_PROMPT,
)
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
llm = OpenAI(temperature=0, openai_api_key=openai_api_key)
streaming_llm = OpenAI(
streaming=True,
callbacks=[ | StreamingStdOutCallbackHandler() | langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler |
get_ipython().system("python3 -m pip install --upgrade langchain 'deeplake[enterprise]' openai tiktoken")
import getpass
import os
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import DeepLake
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import (
CharacterTextSplitter,
RecursiveCharacterTextSplitter,
)
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
activeloop_token = getpass.getpass("Activeloop Token:")
os.environ["ACTIVELOOP_TOKEN"] = activeloop_token
os.environ["ACTIVELOOP_ORG"] = getpass.getpass("Activeloop Org:")
org_id = os.environ["ACTIVELOOP_ORG"]
embeddings = OpenAIEmbeddings()
dataset_path = "hub://" + org_id + "/data"
with open("messages.txt") as f:
state_of_the_union = f.read()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
from langchain.prompts import (
ChatPromptTemplate,
FewShotChatMessagePromptTemplate,
)
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
]
example_prompt = ChatPromptTemplate.from_messages(
[
("human", "{input}"),
("ai", "{output}"),
]
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
example_prompt=example_prompt,
examples=examples,
)
print(few_shot_prompt.format())
final_prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a wondrous wizard of math."),
few_shot_prompt,
("human", "{input}"),
]
)
from langchain_community.chat_models import ChatAnthropic
chain = final_prompt | ChatAnthropic(temperature=0.0)
chain.invoke({"input": "What's the square of a triangle?"})
from langchain.prompts import SemanticSimilarityExampleSelector
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
{"input": "2+4", "output": "6"},
{"input": "What did the cow say to the moon?", "output": "nothing at all"},
{
"input": "Write me a poem about the moon",
"output": "One for the moon, and one for me, who are we to talk about the moon?",
},
]
to_vectorize = [" ".join(example.values()) for example in examples]
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_texts(to_vectorize, embeddings, metadatas=examples)
example_selector = SemanticSimilarityExampleSelector(
vectorstore=vectorstore,
k=2,
)
example_selector.select_examples({"input": "horse"})
from langchain.prompts import (
ChatPromptTemplate,
FewShotChatMessagePromptTemplate,
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
input_variables=["input"],
example_selector=example_selector,
example_prompt=ChatPromptTemplate.from_messages(
[("human", "{input}"), ("ai", "{output}")]
),
)
print(few_shot_prompt.format(input="What's 3+3?"))
final_prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a wondrous wizard of math."),
few_shot_prompt,
("human", "{input}"),
]
)
print(few_shot_prompt.format(input="What's 3+3?"))
from langchain_community.chat_models import ChatAnthropic
chain = final_prompt | | ChatAnthropic(temperature=0.0) | langchain_community.chat_models.ChatAnthropic |
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_openai import ChatOpenAI
api_wrapper = | WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100) | langchain_community.utilities.WikipediaAPIWrapper |
get_ipython().run_line_magic('pip', 'install -U --quiet langchain langchain_community openai chromadb langchain-experimental')
get_ipython().run_line_magic('pip', 'install --quiet "unstructured[all-docs]" pypdf pillow pydantic lxml pillow matplotlib chromadb tiktoken')
import logging
import zipfile
import requests
logging.basicConfig(level=logging.INFO)
data_url = "https://storage.googleapis.com/benchmarks-artifacts/langchain-docs-benchmarking/cj.zip"
result = requests.get(data_url)
filename = "cj.zip"
with open(filename, "wb") as file:
file.write(result.content)
with zipfile.ZipFile(filename, "r") as zip_ref:
zip_ref.extractall()
from langchain_community.document_loaders import PyPDFLoader
loader = PyPDFLoader("./cj/cj.pdf")
docs = loader.load()
tables = []
texts = [d.page_content for d in docs]
len(texts)
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatVertexAI
from langchain_community.llms import VertexAI
from langchain_core.messages import AIMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableLambda
def generate_text_summaries(texts, tables, summarize_texts=False):
"""
Summarize text elements
texts: List of str
tables: List of str
summarize_texts: Bool to summarize texts
"""
prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \
These summaries will be embedded and used to retrieve the raw text or table elements. \
Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """
prompt = PromptTemplate.from_template(prompt_text)
empty_response = RunnableLambda(
lambda x: | AIMessage(content="Error processing document") | langchain_core.messages.AIMessage |
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(docs, embeddings)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
print(docs[0].page_content)
retriever = db.as_retriever()
docs = retriever.invoke(query)
print(docs[0].page_content)
docs_and_scores = db.similarity_search_with_score(query)
docs_and_scores[0]
embedding_vector = embeddings.embed_query(query)
docs_and_scores = db.similarity_search_by_vector(embedding_vector)
db.save_local("faiss_index")
new_db = | FAISS.load_local("faiss_index", embeddings) | langchain_community.vectorstores.FAISS.load_local |
import random
from docarray import BaseDoc
from docarray.typing import NdArray
from langchain.retrievers import DocArrayRetriever
from langchain_community.embeddings import FakeEmbeddings
embeddings = FakeEmbeddings(size=32)
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32]
year: int
color: str
from docarray.index import InMemoryExactNNIndex
db = InMemoryExactNNIndex[MyDoc]()
db.index(
[
MyDoc(
title=f"My document {i}",
title_embedding=embeddings.embed_query(f"query {i}"),
year=i,
color=random.choice(["red", "green", "blue"]),
)
for i in range(100)
]
)
filter_query = {"year": {"$lte": 90}}
retriever = DocArrayRetriever(
index=db,
embeddings=embeddings,
search_field="title_embedding",
content_field="title",
filters=filter_query,
)
doc = retriever.get_relevant_documents("some query")
print(doc)
from docarray.index import HnswDocumentIndex
db = HnswDocumentIndex[MyDoc](work_dir="hnsw_index")
db.index(
[
MyDoc(
title=f"My document {i}",
title_embedding=embeddings.embed_query(f"query {i}"),
year=i,
color=random.choice(["red", "green", "blue"]),
)
for i in range(100)
]
)
filter_query = {"year": {"$lte": 90}}
retriever = DocArrayRetriever(
index=db,
embeddings=embeddings,
search_field="title_embedding",
content_field="title",
filters=filter_query,
)
doc = retriever.get_relevant_documents("some query")
print(doc)
from pydantic import Field
class WeaviateDoc(BaseDoc):
title: str
title_embedding: NdArray[32] = Field(is_embedding=True)
year: int
color: str
from docarray.index import WeaviateDocumentIndex
dbconfig = WeaviateDocumentIndex.DBConfig(host="http://localhost:8080")
db = WeaviateDocumentIndex[WeaviateDoc](db_config=dbconfig)
db.index(
[
MyDoc(
title=f"My document {i}",
title_embedding=embeddings.embed_query(f"query {i}"),
year=i,
color=random.choice(["red", "green", "blue"]),
)
for i in range(100)
]
)
filter_query = {"path": ["year"], "operator": "LessThanEqual", "valueInt": "90"}
retriever = DocArrayRetriever(
index=db,
embeddings=embeddings,
search_field="title_embedding",
content_field="title",
filters=filter_query,
)
doc = retriever.get_relevant_documents("some query")
print(doc)
from docarray.index import ElasticDocIndex
db = ElasticDocIndex[MyDoc](
hosts="http://localhost:9200", index_name="docarray_retriever"
)
db.index(
[
MyDoc(
title=f"My document {i}",
title_embedding=embeddings.embed_query(f"query {i}"),
year=i,
color=random.choice(["red", "green", "blue"]),
)
for i in range(100)
]
)
filter_query = {"range": {"year": {"lte": 90}}}
retriever = DocArrayRetriever(
index=db,
embeddings=embeddings,
search_field="title_embedding",
content_field="title",
filters=filter_query,
)
doc = retriever.get_relevant_documents("some query")
print(doc)
from docarray.index import QdrantDocumentIndex
from qdrant_client.http import models as rest
qdrant_config = QdrantDocumentIndex.DBConfig(path=":memory:")
db = QdrantDocumentIndex[MyDoc](qdrant_config)
db.index(
[
MyDoc(
title=f"My document {i}",
title_embedding=embeddings.embed_query(f"query {i}"),
year=i,
color=random.choice(["red", "green", "blue"]),
)
for i in range(100)
]
)
filter_query = rest.Filter(
must=[
rest.FieldCondition(
key="year",
range=rest.Range(
gte=10,
lt=90,
),
)
]
)
retriever = DocArrayRetriever(
index=db,
embeddings=embeddings,
search_field="title_embedding",
content_field="title",
filters=filter_query,
)
doc = retriever.get_relevant_documents("some query")
print(doc)
movies = [
{
"title": "Inception",
"description": "A thief who steals corporate secrets through the use of dream-sharing technology is given the task of planting an idea into the mind of a CEO.",
"director": "Christopher Nolan",
"rating": 8.8,
},
{
"title": "The Dark Knight",
"description": "When the menace known as the Joker wreaks havoc and chaos on the people of Gotham, Batman must accept one of the greatest psychological and physical tests of his ability to fight injustice.",
"director": "Christopher Nolan",
"rating": 9.0,
},
{
"title": "Interstellar",
"description": "Interstellar explores the boundaries of human exploration as a group of astronauts venture through a wormhole in space. In their quest to ensure the survival of humanity, they confront the vastness of space-time and grapple with love and sacrifice.",
"director": "Christopher Nolan",
"rating": 8.6,
},
{
"title": "Pulp Fiction",
"description": "The lives of two mob hitmen, a boxer, a gangster's wife, and a pair of diner bandits intertwine in four tales of violence and redemption.",
"director": "Quentin Tarantino",
"rating": 8.9,
},
{
"title": "Reservoir Dogs",
"description": "When a simple jewelry heist goes horribly wrong, the surviving criminals begin to suspect that one of them is a police informant.",
"director": "Quentin Tarantino",
"rating": 8.3,
},
{
"title": "The Godfather",
"description": "An aging patriarch of an organized crime dynasty transfers control of his empire to his reluctant son.",
"director": "Francis Ford Coppola",
"rating": 9.2,
},
]
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
from langchain_openai import OpenAIEmbeddings
class MyDoc(BaseDoc):
title: str
description: str
description_embedding: NdArray[1536]
rating: float
director: str
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.model_laboratory import ModelLaboratory
from langchain.prompts import PromptTemplate
from langchain_community.llms import Cohere, HuggingFaceHub
from langchain_openai import OpenAI
import getpass
import os
os.environ["COHERE_API_KEY"] = getpass.getpass("Cohere API Key:")
os.environ["OPENAI_API_KEY"] = getpass.getpass("Open API Key:")
os.environ["HUGGINGFACEHUB_API_TOKEN"] = getpass.getpass("Hugging Face API Key:")
llms = [
OpenAI(temperature=0),
Cohere(temperature=0),
HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 1}),
]
model_lab = ModelLaboratory.from_llms(llms)
model_lab.compare("What color is a flamingo?")
prompt = PromptTemplate(
template="What is the capital of {state}?", input_variables=["state"]
)
model_lab_with_prompt = ModelLaboratory.from_llms(llms, prompt=prompt)
model_lab_with_prompt.compare("New York")
from langchain.chains import SelfAskWithSearchChain
from langchain_community.utilities import SerpAPIWrapper
open_ai_llm = OpenAI(temperature=0)
search = | SerpAPIWrapper() | langchain_community.utilities.SerpAPIWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-text-splitters tiktoken')
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
from langchain_text_splitters import CharacterTextSplitter
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
chunk_size=100, chunk_overlap=0
)
texts = text_splitter.split_text(state_of_the_union)
print(texts[0])
from langchain_text_splitters import TokenTextSplitter
text_splitter = TokenTextSplitter(chunk_size=10, chunk_overlap=0)
texts = text_splitter.split_text(state_of_the_union)
print(texts[0])
get_ipython().run_line_magic('pip', 'install --upgrade --quiet spacy')
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
from langchain_text_splitters import SpacyTextSplitter
text_splitter = SpacyTextSplitter(chunk_size=1000)
texts = text_splitter.split_text(state_of_the_union)
print(texts[0])
from langchain_text_splitters import SentenceTransformersTokenTextSplitter
splitter = SentenceTransformersTokenTextSplitter(chunk_overlap=0)
text = "Lorem "
count_start_and_stop_tokens = 2
text_token_count = splitter.count_tokens(text=text) - count_start_and_stop_tokens
print(text_token_count)
token_multiplier = splitter.maximum_tokens_per_chunk // text_token_count + 1
text_to_split = text * token_multiplier
print(f"tokens in text to split: {splitter.count_tokens(text=text_to_split)}")
text_chunks = splitter.split_text(text=text_to_split)
print(text_chunks[1])
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
from langchain_text_splitters import NLTKTextSplitter
text_splitter = NLTKTextSplitter(chunk_size=1000)
texts = text_splitter.split_text(state_of_the_union)
print(texts[0])
with open("./your_korean_doc.txt") as f:
korean_document = f.read()
from langchain_text_splitters import KonlpyTextSplitter
text_splitter = | KonlpyTextSplitter() | langchain_text_splitters.KonlpyTextSplitter |
get_ipython().system(' pip install langchain replicate')
from langchain_community.chat_models import ChatOllama
llama2_chat = ChatOllama(model="llama2:13b-chat")
llama2_code = ChatOllama(model="codellama:7b-instruct")
from langchain_community.llms import Replicate
replicate_id = "meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d"
llama2_chat_replicate = Replicate(
model=replicate_id, input={"temperature": 0.01, "max_length": 500, "top_p": 1}
)
llm = llama2_chat
from langchain_community.utilities import SQLDatabase
db = SQLDatabase.from_uri("sqlite:///nba_roster.db", sample_rows_in_table_info=0)
def get_schema(_):
return db.get_table_info()
def run_query(query):
return db.run(query)
from langchain_core.prompts import ChatPromptTemplate
template = """Based on the table schema below, write a SQL query that would answer the user's question:
{schema}
Question: {question}
SQL Query:"""
prompt = ChatPromptTemplate.from_messages(
[
("system", "Given an input question, convert it to a SQL query. No pre-amble."),
("human", template),
]
)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
sql_response = (
| RunnablePassthrough.assign(schema=get_schema) | langchain_core.runnables.RunnablePassthrough.assign |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet multion langchain -q')
from langchain_community.agent_toolkits import MultionToolkit
toolkit = | MultionToolkit() | langchain_community.agent_toolkits.MultionToolkit |
get_ipython().system('pip install -U openai langchain langchain-experimental')
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_openai import ChatOpenAI
chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=256)
chat.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "What is this image showing"},
{
"type": "image_url",
"image_url": {
"url": "https://raw.githubusercontent.com/langchain-ai/langchain/master/docs/static/img/langchain_stack.png",
"detail": "auto",
},
},
]
)
]
)
from langchain.agents.openai_assistant import OpenAIAssistantRunnable
interpreter_assistant = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant",
instructions="You are a personal math tutor. Write and run code to answer math questions.",
tools=[{"type": "code_interpreter"}],
model="gpt-4-1106-preview",
)
output = interpreter_assistant.invoke({"content": "What's 10 - 4 raised to the 2.7"})
output
get_ipython().system('pip install e2b duckduckgo-search')
from langchain.tools import DuckDuckGoSearchRun, E2BDataAnalysisTool
tools = [E2BDataAnalysisTool(api_key="..."), DuckDuckGoSearchRun()]
agent = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant e2b tool",
instructions="You are a personal math tutor. Write and run code to answer math questions. You can also search the internet.",
tools=tools,
model="gpt-4-1106-preview",
as_agent=True,
)
from langchain.agents import AgentExecutor
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"content": "What's the weather in SF today divided by 2.7"})
agent = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant e2b tool",
instructions="You are a personal math tutor. Write and run code to answer math questions.",
tools=tools,
model="gpt-4-1106-preview",
as_agent=True,
)
from langchain_core.agents import AgentFinish
def execute_agent(agent, tools, input):
tool_map = {tool.name: tool for tool in tools}
response = agent.invoke(input)
while not isinstance(response, AgentFinish):
tool_outputs = []
for action in response:
tool_output = tool_map[action.tool].invoke(action.tool_input)
print(action.tool, action.tool_input, tool_output, end="\n\n")
tool_outputs.append(
{"output": tool_output, "tool_call_id": action.tool_call_id}
)
response = agent.invoke(
{
"tool_outputs": tool_outputs,
"run_id": action.run_id,
"thread_id": action.thread_id,
}
)
return response
response = execute_agent(agent, tools, {"content": "What's 10 - 4 raised to the 2.7"})
print(response.return_values["output"])
next_response = execute_agent(
agent, tools, {"content": "now add 17.241", "thread_id": response.thread_id}
)
print(next_response.return_values["output"])
chat = ChatOpenAI(model="gpt-3.5-turbo-1106").bind(
response_format={"type": "json_object"}
)
output = chat.invoke(
[
SystemMessage(
content="Extract the 'name' and 'origin' of any companies mentioned in the following statement. Return a JSON list."
),
HumanMessage(
content="Google was founded in the USA, while Deepmind was founded in the UK"
),
]
)
print(output.content)
import json
json.loads(output.content)
chat = ChatOpenAI(model="gpt-3.5-turbo-1106")
output = chat.generate(
[
[
SystemMessage(
content="Extract the 'name' and 'origin' of any companies mentioned in the following statement. Return a JSON list."
),
HumanMessage(
content="Google was founded in the USA, while Deepmind was founded in the UK"
),
]
]
)
print(output.llm_output)
from typing import Literal
from langchain.output_parsers.openai_tools import PydanticToolsParser
from langchain.utils.openai_functions import convert_pydantic_to_openai_tool
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
class GetCurrentWeather(BaseModel):
"""Get the current weather in a location."""
location: str = Field(description="The city and state, e.g. San Francisco, CA")
unit: Literal["celsius", "fahrenheit"] = Field(
default="fahrenheit", description="The temperature unit, default to fahrenheit"
)
prompt = ChatPromptTemplate.from_messages(
[("system", "You are a helpful assistant"), ("user", "{input}")]
)
model = ChatOpenAI(model="gpt-3.5-turbo-1106").bind(
tools=[ | convert_pydantic_to_openai_tool(GetCurrentWeather) | langchain.utils.openai_functions.convert_pydantic_to_openai_tool |
from langchain.prompts.pipeline import PipelinePromptTemplate
from langchain.prompts.prompt import PromptTemplate
full_template = """{introduction}
{example}
{start}"""
full_prompt = PromptTemplate.from_template(full_template)
introduction_template = """You are impersonating {person}."""
introduction_prompt = PromptTemplate.from_template(introduction_template)
example_template = """Here's an example of an interaction:
Q: {example_q}
A: {example_a}"""
example_prompt = PromptTemplate.from_template(example_template)
start_template = """Now, do this for real!
Q: {input}
A:"""
start_prompt = | PromptTemplate.from_template(start_template) | langchain.prompts.prompt.PromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai duckduckgo-search')
from langchain.tools import DuckDuckGoSearchRun
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
search = | DuckDuckGoSearchRun() | langchain.tools.DuckDuckGoSearchRun |
from langchain_core.pydantic_v1 import BaseModel, Field
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
from langchain_openai import ChatOpenAI
model = ChatOpenAI()
model_with_structure = model.with_structured_output(Joke)
model_with_structure.invoke("Tell me a joke about cats")
model_with_structure = model.with_structured_output(Joke, method="json_mode")
model_with_structure.invoke(
"Tell me a joke about cats, respond in JSON with `setup` and `punchline` keys"
)
from langchain_fireworks import ChatFireworks
model = ChatFireworks(model="accounts/fireworks/models/firefunction-v1")
model_with_structure = model.with_structured_output(Joke)
model_with_structure.invoke("Tell me a joke about cats")
model_with_structure = model.with_structured_output(Joke, method="json_mode")
model_with_structure.invoke(
"Tell me a joke about dogs, respond in JSON with `setup` and `punchline` keys"
)
from langchain_mistralai import ChatMistralAI
model = | ChatMistralAI(model="mistral-large-latest") | langchain_mistralai.ChatMistralAI |
from langchain.prompts import PromptTemplate
prompt = (
PromptTemplate.from_template("Tell me a joke about {topic}")
+ ", make it funny"
+ "\n\nand in {language}"
)
prompt
prompt.format(topic="sports", language="spanish")
from langchain.chains import LLMChain
from langchain_openai import ChatOpenAI
model = ChatOpenAI()
chain = LLMChain(llm=model, prompt=prompt)
chain.run(topic="sports", language="spanish")
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
prompt = SystemMessage(content="You are a nice pirate")
new_prompt = (
prompt + HumanMessage(content="hi") + | AIMessage(content="what?") | langchain_core.messages.AIMessage |
from langchain.chains import create_citation_fuzzy_match_chain
from langchain_openai import ChatOpenAI
question = "What did the author do during college?"
context = """
My name is Jason Liu, and I grew up in Toronto Canada but I was born in China.
I went to an arts highschool but in university I studied Computational Mathematics and physics.
As part of coop I worked at many companies including Stitchfix, Facebook.
I also started the Data Science club at the University of Waterloo and I was the president of the club for 2 years.
"""
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
chain = | create_citation_fuzzy_match_chain(llm) | langchain.chains.create_citation_fuzzy_match_chain |
get_ipython().system('pip install gymnasium')
import tenacity
from langchain.output_parsers import RegexParser
from langchain.schema import (
HumanMessage,
SystemMessage,
)
class GymnasiumAgent:
@classmethod
def get_docs(cls, env):
return env.unwrapped.__doc__
def __init__(self, model, env):
self.model = model
self.env = env
self.docs = self.get_docs(env)
self.instructions = """
Your goal is to maximize your return, i.e. the sum of the rewards you receive.
I will give you an observation, reward, terminiation flag, truncation flag, and the return so far, formatted as:
Observation: <observation>
Reward: <reward>
Termination: <termination>
Truncation: <truncation>
Return: <sum_of_rewards>
You will respond with an action, formatted as:
Action: <action>
where you replace <action> with your actual action.
Do nothing else but return the action.
"""
self.action_parser = | RegexParser(
regex=r"Action: (.*) | langchain.output_parsers.RegexParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-cloud-speech')
from langchain_community.document_loaders import GoogleSpeechToTextLoader
project_id = "<PROJECT_ID>"
file_path = "gs://cloud-samples-data/speech/audio.flac"
loader = | GoogleSpeechToTextLoader(project_id=project_id, file_path=file_path) | langchain_community.document_loaders.GoogleSpeechToTextLoader |
from langchain_community.llms import Ollama
llm = Ollama(model="llama2")
llm("The first man on the moon was ...")
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
llm = Ollama(
model="llama2", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])
)
llm("The first man on the moon was ...")
from langchain_community.llms import Ollama
llm = Ollama(model="llama2:13b")
llm("The first man on the moon was ... think step by step")
get_ipython().run_line_magic('env', 'CMAKE_ARGS="-DLLAMA_METAL=on"')
get_ipython().run_line_magic('env', 'FORCE_CMAKE=1')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet llama-cpp-python --no-cache-dirclear')
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_community.llms import LlamaCpp
llm = LlamaCpp(
model_path="/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin",
n_gpu_layers=1,
n_batch=512,
n_ctx=2048,
f16_kv=True,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
verbose=True,
)
llm("The first man on the moon was ... Let's think step by step")
get_ipython().run_line_magic('pip', 'install gpt4all')
from langchain_community.llms import GPT4All
llm = GPT4All(
model="/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin"
)
llm("The first man on the moon was ... Let's think step by step")
from langchain_community.llms.llamafile import Llamafile
llm = Llamafile()
llm.invoke("The first man on the moon was ... Let's think step by step.")
llm = LlamaCpp(
model_path="/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin",
n_gpu_layers=1,
n_batch=512,
n_ctx=2048,
f16_kv=True,
callback_manager=CallbackManager([ | StreamingStdOutCallbackHandler() | langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler |
meals = [
"Beef Enchiladas with Feta cheese. Mexican-Greek fusion",
"Chicken Flatbreads with red sauce. Italian-Mexican fusion",
"Veggie sweet potato quesadillas with vegan cheese",
"One-Pan Tortelonni bake with peppers and onions",
]
from langchain_openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-instruct")
from langchain.prompts import PromptTemplate
PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}".
Embed the meal into the given text: "{text_to_personalize}".
Prepend a personalized message including the user's name "{user}"
and their preference "{preference}".
Make it sound good.
"""
PROMPT = PromptTemplate(
input_variables=["meal", "text_to_personalize", "user", "preference"],
template=PROMPT_TEMPLATE,
)
import langchain_experimental.rl_chain as rl_chain
chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs \
believe you will love it!",
)
print(response["response"])
for _ in range(5):
try:
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user= | rl_chain.BasedOn("Tom") | langchain_experimental.rl_chain.BasedOn |
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain_community.utilities import GoogleSearchAPIWrapper
from langchain_openai import OpenAI
search = GoogleSearchAPIWrapper()
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
)
]
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"
{chat_history}
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"],
)
memory = ConversationBufferMemory(memory_key="chat_history")
llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
agent_chain = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True, memory=memory
)
agent_chain.run(input="How many people live in canada?")
agent_chain.run(input="what is their national anthem called?")
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools, prefix=prefix, suffix=suffix, input_variables=["input", "agent_scratchpad"]
)
llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)
agent = | ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True) | langchain.agents.ZeroShotAgent |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai.chat_models import ChatOpenAI
model = ChatOpenAI()
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're an assistant who's good at {ability}. Respond in 20 words or fewer",
),
MessagesPlaceholder(variable_name="history"),
("human", "{input}"),
]
)
runnable = prompt | model
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
store = {}
def get_session_history(session_id: str) -> BaseChatMessageHistory:
if session_id not in store:
store[session_id] = ChatMessageHistory()
return store[session_id]
with_message_history = RunnableWithMessageHistory(
runnable,
get_session_history,
input_messages_key="input",
history_messages_key="history",
)
with_message_history.invoke(
{"ability": "math", "input": "What does cosine mean?"},
config={"configurable": {"session_id": "abc123"}},
)
with_message_history.invoke(
{"ability": "math", "input": "What?"},
config={"configurable": {"session_id": "abc123"}},
)
with_message_history.invoke(
{"ability": "math", "input": "What?"},
config={"configurable": {"session_id": "def234"}},
)
from langchain_core.runnables import ConfigurableFieldSpec
store = {}
def get_session_history(user_id: str, conversation_id: str) -> BaseChatMessageHistory:
if (user_id, conversation_id) not in store:
store[(user_id, conversation_id)] = ChatMessageHistory()
return store[(user_id, conversation_id)]
with_message_history = RunnableWithMessageHistory(
runnable,
get_session_history,
input_messages_key="input",
history_messages_key="history",
history_factory_config=[
ConfigurableFieldSpec(
id="user_id",
annotation=str,
name="User ID",
description="Unique identifier for the user.",
default="",
is_shared=True,
),
ConfigurableFieldSpec(
id="conversation_id",
annotation=str,
name="Conversation ID",
description="Unique identifier for the conversation.",
default="",
is_shared=True,
),
],
)
with_message_history.invoke(
{"ability": "math", "input": "Hello"},
config={"configurable": {"user_id": "123", "conversation_id": "1"}},
)
from langchain_core.messages import HumanMessage
from langchain_core.runnables import RunnableParallel
chain = RunnableParallel({"output_message": ChatOpenAI()})
def get_session_history(session_id: str) -> BaseChatMessageHistory:
if session_id not in store:
store[session_id] = ChatMessageHistory()
return store[session_id]
with_message_history = RunnableWithMessageHistory(
chain,
get_session_history,
output_messages_key="output_message",
)
with_message_history.invoke(
[HumanMessage(content="What did Simone de Beauvoir believe about free will")],
config={"configurable": {"session_id": "baz"}},
)
with_message_history.invoke(
[HumanMessage(content="How did this compare to Sartre")],
config={"configurable": {"session_id": "baz"}},
)
RunnableWithMessageHistory(
ChatOpenAI(),
get_session_history,
)
from operator import itemgetter
RunnableWithMessageHistory(
itemgetter("input_messages") | ChatOpenAI(),
get_session_history,
input_messages_key="input_messages",
)
get_ipython().run_line_magic('pip', 'install --upgrade --quiet redis')
REDIS_URL = "redis://localhost:6379/0"
from langchain_community.chat_message_histories import RedisChatMessageHistory
def get_message_history(session_id: str) -> RedisChatMessageHistory:
return | RedisChatMessageHistory(session_id, url=REDIS_URL) | langchain_community.chat_message_histories.RedisChatMessageHistory |
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
template = """Answer the users question based only on the following context:
<context>
{context}
</context>
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI(temperature=0)
search = DuckDuckGoSearchAPIWrapper()
def retriever(query):
return search.run(query)
chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| model
| StrOutputParser()
)
simple_query = "what is langchain?"
chain.invoke(simple_query)
distracted_query = "man that sam bankman fried trial was crazy! what is langchain?"
chain.invoke(distracted_query)
retriever(distracted_query)
template = """Provide a better search query for \
web search engine to answer the given question, end \
the queries with ’**’. Question: \
{x} Answer:"""
rewrite_prompt = ChatPromptTemplate.from_template(template)
from langchain import hub
rewrite_prompt = hub.pull("langchain-ai/rewrite")
print(rewrite_prompt.template)
def _parse(text):
return text.strip("**")
rewriter = rewrite_prompt | ChatOpenAI(temperature=0) | StrOutputParser() | _parse
rewriter.invoke({"x": distracted_query})
rewrite_retrieve_read_chain = (
{
"context": {"x": | RunnablePassthrough() | langchain_core.runnables.RunnablePassthrough |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet cohere')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss-cpu')
import getpass
import os
os.environ["COHERE_API_KEY"] = getpass.getpass("Cohere API Key:")
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import CohereEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_text_splitters import RecursiveCharacterTextSplitter
documents = TextLoader("../../modules/state_of_the_union.txt").load()
text_splitter = | RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100) | langchain_text_splitters.RecursiveCharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-cloud-text-to-speech')
from langchain.tools import GoogleCloudTextToSpeechTool
text_to_speak = "Hello world!"
tts = | GoogleCloudTextToSpeechTool() | langchain.tools.GoogleCloudTextToSpeechTool |
import uuid
from pathlib import Path
import langchain
import torch
from bs4 import BeautifulSoup as Soup
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryByteStore, LocalFileStore
from langchain_community.document_loaders.recursive_url_loader import (
RecursiveUrlLoader,
)
from langchain_community.vectorstores import Chroma
from langchain_text_splitters import RecursiveCharacterTextSplitter # noqa
DOCSTORE_DIR = "."
DOCSTORE_ID_KEY = "doc_id"
loader = RecursiveUrlLoader(
"https://ar5iv.labs.arxiv.org/html/1706.03762",
max_depth=2,
extractor=lambda x: Soup(x, "html.parser").text,
)
data = loader.load()
print(f"Loaded {len(data)} documents")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
print(f"Split into {len(all_splits)} documents")
from langchain_community.embeddings import QuantizedBiEncoderEmbeddings
from langchain_core.embeddings import Embeddings
model_name = "Intel/bge-small-en-v1.5-rag-int8-static"
encode_kwargs = {"normalize_embeddings": True} # set True to compute cosine similarity
model_inc = QuantizedBiEncoderEmbeddings(
model_name=model_name,
encode_kwargs=encode_kwargs,
query_instruction="Represent this sentence for searching relevant passages: ",
)
def get_multi_vector_retriever(
docstore_id_key: str, collection_name: str, embedding_function: Embeddings
):
"""Create the composed retriever object."""
vectorstore = Chroma(
collection_name=collection_name,
embedding_function=embedding_function,
)
store = InMemoryByteStore()
return MultiVectorRetriever(
vectorstore=vectorstore,
byte_store=store,
id_key=docstore_id_key,
)
retriever = get_multi_vector_retriever(DOCSTORE_ID_KEY, "multi_vec_store", model_inc)
child_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
id_key = "doc_id"
doc_ids = [str(uuid.uuid4()) for _ in all_splits]
sub_docs = []
for i, doc in enumerate(all_splits):
_id = doc_ids[i]
_sub_docs = child_text_splitter.split_documents([doc])
for _doc in _sub_docs:
_doc.metadata[id_key] = _id
sub_docs.extend(_sub_docs)
retriever.vectorstore.add_documents(sub_docs)
retriever.docstore.mset(list(zip(doc_ids, all_splits)))
import torch
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "Intel/neural-chat-7b-v3-3"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id, device_map="auto", torch_dtype=torch.bfloat16
)
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=100)
hf = | HuggingFacePipeline(pipeline=pipe) | langchain.llms.huggingface_pipeline.HuggingFacePipeline |
from typing import List
from langchain.prompts.chat import (
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
BaseMessage,
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
class CAMELAgent:
def __init__(
self,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.system_message = system_message
self.model = model
self.init_messages()
def reset(self) -> None:
self.init_messages()
return self.stored_messages
def init_messages(self) -> None:
self.stored_messages = [self.system_message]
def update_messages(self, message: BaseMessage) -> List[BaseMessage]:
self.stored_messages.append(message)
return self.stored_messages
def step(
self,
input_message: HumanMessage,
) -> AIMessage:
messages = self.update_messages(input_message)
output_message = self.model(messages)
self.update_messages(output_message)
return output_message
import os
os.environ["OPENAI_API_KEY"] = ""
assistant_role_name = "Python Programmer"
user_role_name = "Stock Trader"
task = "Develop a trading bot for the stock market"
word_limit = 50 # word limit for task brainstorming
task_specifier_sys_msg = SystemMessage(content="You can make a task more specific.")
task_specifier_prompt = """Here is a task that {assistant_role_name} will help {user_role_name} to complete: {task}.
Please make it more specific. Be creative and imaginative.
Please reply with the specified task in {word_limit} words or less. Do not add anything else."""
task_specifier_template = HumanMessagePromptTemplate.from_template(
template=task_specifier_prompt
)
task_specify_agent = CAMELAgent(task_specifier_sys_msg, ChatOpenAI(temperature=1.0))
task_specifier_msg = task_specifier_template.format_messages(
assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
task=task,
word_limit=word_limit,
)[0]
specified_task_msg = task_specify_agent.step(task_specifier_msg)
print(f"Specified task: {specified_task_msg.content}")
specified_task = specified_task_msg.content
assistant_inception_prompt = """Never forget you are a {assistant_role_name} and I am a {user_role_name}. Never flip roles! Never instruct me!
We share a common interest in collaborating to successfully complete a task.
You must help me to complete the task.
Here is the task: {task}. Never forget our task!
I must instruct you based on your expertise and my needs to complete the task.
I must give you one instruction at a time.
You must write a specific solution that appropriately completes the requested instruction.
You must decline my instruction honestly if you cannot perform the instruction due to physical, moral, legal reasons or your capability and explain the reasons.
Do not add anything else other than your solution to my instruction.
You are never supposed to ask me any questions you only answer questions.
You are never supposed to reply with a flake solution. Explain your solutions.
Your solution must be declarative sentences and simple present tense.
Unless I say the task is completed, you should always start with:
Solution: <YOUR_SOLUTION>
<YOUR_SOLUTION> should be specific and provide preferable implementations and examples for task-solving.
Always end <YOUR_SOLUTION> with: Next request."""
user_inception_prompt = """Never forget you are a {user_role_name} and I am a {assistant_role_name}. Never flip roles! You will always instruct me.
We share a common interest in collaborating to successfully complete a task.
I must help you to complete the task.
Here is the task: {task}. Never forget our task!
You must instruct me based on my expertise and your needs to complete the task ONLY in the following two ways:
1. Instruct with a necessary input:
Instruction: <YOUR_INSTRUCTION>
Input: <YOUR_INPUT>
2. Instruct without any input:
Instruction: <YOUR_INSTRUCTION>
Input: None
The "Instruction" describes a task or question. The paired "Input" provides further context or information for the requested "Instruction".
You must give me one instruction at a time.
I must write a response that appropriately completes the requested instruction.
I must decline your instruction honestly if I cannot perform the instruction due to physical, moral, legal reasons or my capability and explain the reasons.
You should instruct me not ask me questions.
Now you must start to instruct me using the two ways described above.
Do not add anything else other than your instruction and the optional corresponding input!
Keep giving me instructions and necessary inputs until you think the task is completed.
When the task is completed, you must only reply with a single word <CAMEL_TASK_DONE>.
Never say <CAMEL_TASK_DONE> unless my responses have solved your task."""
def get_sys_msgs(assistant_role_name: str, user_role_name: str, task: str):
assistant_sys_template = SystemMessagePromptTemplate.from_template(
template=assistant_inception_prompt
)
assistant_sys_msg = assistant_sys_template.format_messages(
assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
task=task,
)[0]
user_sys_template = SystemMessagePromptTemplate.from_template(
template=user_inception_prompt
)
user_sys_msg = user_sys_template.format_messages(
assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
task=task,
)[0]
return assistant_sys_msg, user_sys_msg
assistant_sys_msg, user_sys_msg = get_sys_msgs(
assistant_role_name, user_role_name, specified_task
)
assistant_agent = CAMELAgent(assistant_sys_msg, ChatOpenAI(temperature=0.2))
user_agent = CAMELAgent(user_sys_msg, ChatOpenAI(temperature=0.2))
assistant_agent.reset()
user_agent.reset()
user_msg = HumanMessage(
content=(
f"{user_sys_msg.content}. "
"Now start to give me introductions one by one. "
"Only reply with Instruction and Input."
)
)
assistant_msg = | HumanMessage(content=f"{assistant_sys_msg.content}") | langchain.schema.HumanMessage |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic')
import os
import boto3
comprehend_client = boto3.client("comprehend", region_name="us-east-1")
from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain
comprehend_moderation = AmazonComprehendModerationChain(
client=comprehend_client,
verbose=True, # optional
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = | FakeListLLM(responses=responses) | langchain_community.llms.fake.FakeListLLM |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet marqo')
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Marqo
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
import marqo
marqo_url = "http://localhost:8882" # if using marqo cloud replace with your endpoint (console.marqo.ai)
marqo_api_key = "" # if using marqo cloud replace with your api key (console.marqo.ai)
client = marqo.Client(url=marqo_url, api_key=marqo_api_key)
index_name = "langchain-demo"
docsearch = Marqo.from_documents(docs, index_name=index_name)
query = "What did the president say about Ketanji Brown Jackson"
result_docs = docsearch.similarity_search(query)
print(result_docs[0].page_content)
result_docs = docsearch.similarity_search_with_score(query)
print(result_docs[0][0].page_content, result_docs[0][1], sep="\n")
index_name = "langchain-multimodal-demo"
try:
client.delete_index(index_name)
except Exception:
print(f"Creating {index_name}")
settings = {"treat_urls_and_pointers_as_images": True, "model": "ViT-L/14"}
client.create_index(index_name, **settings)
client.index(index_name).add_documents(
[
{
"caption": "Bus",
"image": "https://raw.githubusercontent.com/marqo-ai/marqo/mainline/examples/ImageSearchGuide/data/image4.jpg",
},
{
"caption": "Plane",
"image": "https://raw.githubusercontent.com/marqo-ai/marqo/mainline/examples/ImageSearchGuide/data/image2.jpg",
},
],
)
def get_content(res):
"""Helper to format Marqo's documents into text to be used as page_content"""
return f"{res['caption']}: {res['image']}"
docsearch = Marqo(client, index_name, page_content_builder=get_content)
query = "vehicles that fly"
doc_results = docsearch.similarity_search(query)
for doc in doc_results:
print(doc.page_content)
index_name = "langchain-byo-index-demo"
try:
client.delete_index(index_name)
except Exception:
print(f"Creating {index_name}")
client.create_index(index_name)
client.index(index_name).add_documents(
[
{
"Title": "Smartphone",
"Description": "A smartphone is a portable computer device that combines mobile telephone "
"functions and computing functions into one unit.",
},
{
"Title": "Telephone",
"Description": "A telephone is a telecommunications device that permits two or more users to"
"conduct a conversation when they are too far apart to be easily heard directly.",
},
],
)
def get_content(res):
"""Helper to format Marqo's documents into text to be used as page_content"""
if "text" in res:
return res["text"]
return res["Description"]
docsearch = Marqo(client, index_name, page_content_builder=get_content)
docsearch.add_texts(["This is a document that is about elephants"])
query = "modern communications devices"
doc_results = docsearch.similarity_search(query)
print(doc_results[0].page_content)
query = "elephants"
doc_results = docsearch.similarity_search(query, page_content_builder=get_content)
print(doc_results[0].page_content)
query = {"communications devices": 1.0}
doc_results = docsearch.similarity_search(query)
print(doc_results[0].page_content)
query = {"communications devices": 1.0, "technology post 2000": -1.0}
doc_results = docsearch.similarity_search(query)
print(doc_results[0].page_content)
import getpass
import os
from langchain.chains import RetrievalQAWithSourcesChain
from langchain_openai import OpenAI
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
with open("../../modules/state_of_the_union.txt") as f:
state_of_the_union = f.read()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_text(state_of_the_union)
index_name = "langchain-qa-with-retrieval"
docsearch = | Marqo.from_documents(docs, index_name=index_name) | langchain_community.vectorstores.Marqo.from_documents |
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain_openai import ChatOpenAI, OpenAI
llm = ChatOpenAI(temperature=0.0)
math_llm = OpenAI(temperature=0.0)
tools = load_tools(
["human", "llm-math"],
llm=math_llm,
)
agent_chain = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
agent_chain.run("What's my friend Eric's surname?")
def get_input() -> str:
print("Insert your text. Enter 'q' or press Ctrl-D (or Ctrl-Z on Windows) to end.")
contents = []
while True:
try:
line = input()
except EOFError:
break
if line == "q":
break
contents.append(line)
return "\n".join(contents)
tools = load_tools(["human", "ddg-search"], llm=math_llm, input_func=get_input)
from langchain.tools import HumanInputRun
tool = | HumanInputRun(input_func=get_input) | langchain.tools.HumanInputRun |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet ain-py')
import os
os.environ["AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY"] = ""
import os
from ain.account import Account
if os.environ.get("AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY", None):
account = Account(os.environ["AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY"])
else:
account = Account.create()
os.environ["AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY"] = account.private_key
print(
f"""
address: {account.address}
private_key: {account.private_key}
"""
)
from langchain_community.agent_toolkits.ainetwork.toolkit import AINetworkToolkit
toolkit = | AINetworkToolkit() | langchain_community.agent_toolkits.ainetwork.toolkit.AINetworkToolkit |
from langchain_community.document_loaders import VsdxLoader
loader = | VsdxLoader(file_path="./example_data/fake.vsdx") | langchain_community.document_loaders.VsdxLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pyairtable')
from langchain_community.document_loaders import AirtableLoader
api_key = "xxx"
base_id = "xxx"
table_id = "xxx"
loader = | AirtableLoader(api_key, table_id, base_id) | langchain_community.document_loaders.AirtableLoader |
from langchain_community.document_loaders import OBSDirectoryLoader
endpoint = "your-endpoint"
config = {"ak": "your-access-key", "sk": "your-secret-key"}
loader = OBSDirectoryLoader("your-bucket-name", endpoint=endpoint, config=config)
loader.load()
loader = OBSDirectoryLoader(
"your-bucket-name", endpoint=endpoint, config=config, prefix="test_prefix"
)
loader.load()
config = {"get_token_from_ecs": True}
loader = OBSDirectoryLoader("your-bucket-name", endpoint=endpoint, config=config)
loader.load()
loader = | OBSDirectoryLoader("your-bucket-name", endpoint=endpoint) | langchain_community.document_loaders.OBSDirectoryLoader |
from langchain.agents import Tool
from langchain_community.tools.file_management.read import ReadFileTool
from langchain_community.tools.file_management.write import WriteFileTool
from langchain_community.utilities import SerpAPIWrapper
search = SerpAPIWrapper()
tools = [
Tool(
name="search",
func=search.run,
description="useful for when you need to answer questions about current events. You should ask targeted questions",
),
WriteFileTool(),
| ReadFileTool() | langchain_community.tools.file_management.read.ReadFileTool |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet scann')
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import ScaNN
from langchain_text_splitters import CharacterTextSplitter
loader = | TextLoader("state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
get_ipython().system('poetry run pip install dgml-utils==0.3.0 --upgrade --quiet')
import os
from langchain_community.document_loaders import DocugamiLoader
DOCUGAMI_API_KEY = os.environ.get("DOCUGAMI_API_KEY")
docset_id = "26xpy3aes7xp"
document_ids = ["d7jqdzcj50sj", "cgd1eacfkchw"]
loader = DocugamiLoader(docset_id=docset_id, document_ids=document_ids)
chunks = loader.load()
len(chunks)
loader.min_text_length = 64
loader.include_xml_tags = True
chunks = loader.load()
for chunk in chunks[:5]:
print(chunk)
get_ipython().system('poetry run pip install --upgrade langchain-openai tiktoken chromadb hnswlib')
loader = DocugamiLoader(docset_id="zo954yqy53wp")
chunks = loader.load()
for chunk in chunks:
stripped_metadata = chunk.metadata.copy()
for key in chunk.metadata:
if key not in ["name", "xpath", "id", "structure"]:
del stripped_metadata[key]
chunk.metadata = stripped_metadata
print(len(chunks))
from langchain.chains import RetrievalQA
from langchain_community.vectorstores.chroma import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
embedding = OpenAIEmbeddings()
vectordb = Chroma.from_documents(documents=chunks, embedding=embedding)
retriever = vectordb.as_retriever()
qa_chain = RetrievalQA.from_chain_type(
llm=OpenAI(), chain_type="stuff", retriever=retriever, return_source_documents=True
)
qa_chain("What can tenants do with signage on their properties?")
chain_response = qa_chain("What is rentable area for the property owned by DHA Group?")
chain_response["result"] # correct answer should be 13,500 sq ft
chain_response["source_documents"]
loader = DocugamiLoader(docset_id="zo954yqy53wp")
loader.include_xml_tags = (
True # for additional semantics from the Docugami knowledge graph
)
chunks = loader.load()
print(chunks[0].metadata)
get_ipython().system('poetry run pip install --upgrade lark --quiet')
from langchain.chains.query_constructor.schema import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_community.vectorstores.chroma import Chroma
EXCLUDE_KEYS = ["id", "xpath", "structure"]
metadata_field_info = [
AttributeInfo(
name=key,
description=f"The {key} for this chunk",
type="string",
)
for key in chunks[0].metadata
if key.lower() not in EXCLUDE_KEYS
]
document_content_description = "Contents of this chunk"
llm = OpenAI(temperature=0)
vectordb = Chroma.from_documents(documents=chunks, embedding=embedding)
retriever = SelfQueryRetriever.from_llm(
llm, vectordb, document_content_description, metadata_field_info, verbose=True
)
qa_chain = RetrievalQA.from_chain_type(
llm=OpenAI(),
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
verbose=True,
)
qa_chain(
"What is rentable area for the property owned by DHA Group?"
) # correct answer should be 13,500 sq ft
from typing import Dict, List
from langchain_community.document_loaders import DocugamiLoader
from langchain_core.documents import Document
loader = | DocugamiLoader(docset_id="zo954yqy53wp") | langchain_community.document_loaders.DocugamiLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet metal_sdk')
from metal_sdk.metal import Metal
API_KEY = ""
CLIENT_ID = ""
INDEX_ID = ""
metal = Metal(API_KEY, CLIENT_ID, INDEX_ID)
metal.index({"text": "foo1"})
metal.index({"text": "foo"})
from langchain.retrievers import MetalRetriever
retriever = | MetalRetriever(metal, params={"limit": 2}) | langchain.retrievers.MetalRetriever |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet marqo')
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Marqo
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = | TextLoader("../../modules/state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet elevenlabs')
import os
os.environ["ELEVEN_API_KEY"] = ""
from langchain.tools import ElevenLabsText2SpeechTool
text_to_speak = "Hello world! I am the real slim shady"
tts = | ElevenLabsText2SpeechTool() | langchain.tools.ElevenLabsText2SpeechTool |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet arxiv')
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent, load_tools
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0.0)
tools = load_tools(
["arxiv"],
)
prompt = hub.pull("hwchase17/react")
agent = | create_react_agent(llm, tools, prompt) | langchain.agents.create_react_agent |
import runhouse as rh
from langchain_community.embeddings import (
SelfHostedEmbeddings,
SelfHostedHuggingFaceEmbeddings,
SelfHostedHuggingFaceInstructEmbeddings,
)
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1", use_spot=False)
embeddings = | SelfHostedHuggingFaceEmbeddings(hardware=gpu) | langchain_community.embeddings.SelfHostedHuggingFaceEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-search-results')
import os
from langchain_community.tools.google_scholar import GoogleScholarQueryRun
from langchain_community.utilities.google_scholar import GoogleScholarAPIWrapper
os.environ["SERP_API_KEY"] = ""
tool = GoogleScholarQueryRun(api_wrapper= | GoogleScholarAPIWrapper() | langchain_community.utilities.google_scholar.GoogleScholarAPIWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet manifest-ml')
from langchain_community.llms.manifest import ManifestWrapper
from manifest import Manifest
manifest = Manifest(
client_name="huggingface", client_connection="http://127.0.0.1:5000"
)
print(manifest.client_pool.get_current_client().get_model_params())
llm = ManifestWrapper(
client=manifest, llm_kwargs={"temperature": 0.001, "max_tokens": 256}
)
from langchain.chains.mapreduce import MapReduceChain
from langchain.prompts import PromptTemplate
from langchain_text_splitters import CharacterTextSplitter
_prompt = """Write a concise summary of the following:
{text}
CONCISE SUMMARY:"""
prompt = PromptTemplate.from_template(_prompt)
text_splitter = CharacterTextSplitter()
mp_chain = MapReduceChain.from_params(llm, prompt, text_splitter)
with open("../../modules/state_of_the_union.txt") as f:
state_of_the_union = f.read()
mp_chain.run(state_of_the_union)
from langchain.model_laboratory import ModelLaboratory
manifest1 = ManifestWrapper(
client=Manifest(
client_name="huggingface", client_connection="http://127.0.0.1:5000"
),
llm_kwargs={"temperature": 0.01},
)
manifest2 = ManifestWrapper(
client=Manifest(
client_name="huggingface", client_connection="http://127.0.0.1:5001"
),
llm_kwargs={"temperature": 0.01},
)
manifest3 = ManifestWrapper(
client=Manifest(
client_name="huggingface", client_connection="http://127.0.0.1:5002"
),
llm_kwargs={"temperature": 0.01},
)
llms = [manifest1, manifest2, manifest3]
model_lab = | ModelLaboratory(llms) | langchain.model_laboratory.ModelLaboratory |
from typing import List
from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0)
class Joke(BaseModel):
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
joke_query = "Tell me a joke."
parser = JsonOutputParser(pydantic_object=Joke)
prompt = PromptTemplate(
template="Answer the user query.\n{format_instructions}\n{query}\n",
input_variables=["query"],
partial_variables={"format_instructions": parser.get_format_instructions()},
)
chain = prompt | model | parser
chain.invoke({"query": joke_query})
for s in chain.stream({"query": joke_query}):
print(s)
joke_query = "Tell me a joke."
parser = | JsonOutputParser() | langchain_core.output_parsers.JsonOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet cos-python-sdk-v5')
from langchain_community.document_loaders import TencentCOSDirectoryLoader
from qcloud_cos import CosConfig
conf = CosConfig(
Region="your cos region",
SecretId="your cos secret_id",
SecretKey="your cos secret_key",
)
loader = TencentCOSDirectoryLoader(conf=conf, bucket="you_cos_bucket")
loader.load()
loader = | TencentCOSDirectoryLoader(conf=conf, bucket="you_cos_bucket", prefix="fake") | langchain_community.document_loaders.TencentCOSDirectoryLoader |
get_ipython().system('pip install -U oci')
from langchain_community.llms import OCIGenAI
llm = OCIGenAI(
model_id="MY_MODEL",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="MY_OCID",
)
response = llm.invoke("Tell me one fact about earth", temperature=0.7)
print(response)
from langchain.chains import LLMChain
from langchain_core.prompts import PromptTemplate
llm = OCIGenAI(
model_id="MY_MODEL",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="MY_OCID",
auth_type="SECURITY_TOKEN",
auth_profile="MY_PROFILE", # replace with your profile name
model_kwargs={"temperature": 0.7, "top_p": 0.75, "max_tokens": 200},
)
prompt = PromptTemplate(input_variables=["query"], template="{query}")
llm_chain = LLMChain(llm=llm, prompt=prompt)
response = llm_chain.invoke("what is the capital of france?")
print(response)
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough
from langchain_community.embeddings import OCIGenAIEmbeddings
from langchain_community.vectorstores import FAISS
embeddings = OCIGenAIEmbeddings(
model_id="MY_EMBEDDING_MODEL",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="MY_OCID",
)
vectorstore = FAISS.from_texts(
[
"Larry Ellison co-founded Oracle Corporation in 1977 with Bob Miner and Ed Oates.",
"Oracle Corporation is an American multinational computer technology company headquartered in Austin, Texas, United States.",
],
embedding=embeddings,
)
retriever = vectorstore.as_retriever()
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = | PromptTemplate.from_template(template) | langchain_core.prompts.PromptTemplate.from_template |
from datetime import datetime, timedelta
import faiss
from langchain.docstore import InMemoryDocstore
from langchain.retrievers import TimeWeightedVectorStoreRetriever
from langchain_community.vectorstores import FAISS
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
embeddings_model = OpenAIEmbeddings()
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model, index, InMemoryDocstore({}), {})
retriever = TimeWeightedVectorStoreRetriever(
vectorstore=vectorstore, decay_rate=0.0000000000000000000000001, k=1
)
yesterday = datetime.now() - timedelta(days=1)
retriever.add_documents(
[Document(page_content="hello world", metadata={"last_accessed_at": yesterday})]
)
retriever.add_documents([Document(page_content="hello foo")])
retriever.get_relevant_documents("hello world")
embeddings_model = OpenAIEmbeddings()
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model, index, | InMemoryDocstore({}) | langchain.docstore.InMemoryDocstore |
from langchain.prompts import ChatMessagePromptTemplate
prompt = "May the {subject} be with you"
chat_message_prompt = ChatMessagePromptTemplate.from_template(
role="Jedi", template=prompt
)
chat_message_prompt.format(subject="force")
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
human_prompt = "Summarize our conversation so far in {word_count} words."
human_message_template = HumanMessagePromptTemplate.from_template(human_prompt)
chat_prompt = ChatPromptTemplate.from_messages(
[MessagesPlaceholder(variable_name="conversation"), human_message_template]
)
from langchain_core.messages import AIMessage, HumanMessage
human_message = | HumanMessage(content="What is the best way to learn programming?") | langchain_core.messages.HumanMessage |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet psycopg2-binary')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken')
YBUSER = "[SANDBOX USER]"
YBPASSWORD = "[SANDBOX PASSWORD]"
YBDATABASE = "[SANDBOX_DATABASE]"
YBHOST = "trialsandbox.sandbox.aws.yellowbrickcloud.com"
OPENAI_API_KEY = "[OPENAI API KEY]"
import os
import pathlib
import re
import sys
import urllib.parse as urlparse
from getpass import getpass
import psycopg2
from IPython.display import Markdown, display
from langchain.chains import LLMChain, RetrievalQAWithSourcesChain
from langchain.docstore.document import Document
from langchain_community.vectorstores import Yellowbrick
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
yellowbrick_connection_string = (
f"postgres://{urlparse.quote(YBUSER)}:{YBPASSWORD}@{YBHOST}:5432/{YBDATABASE}"
)
YB_DOC_DATABASE = "sample_data"
YB_DOC_TABLE = "yellowbrick_documentation"
embedding_table = "my_embeddings"
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
system_template = """If you don't know the answer, Make up your best guess."""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
prompt = ChatPromptTemplate.from_messages(messages)
chain_type_kwargs = {"prompt": prompt}
llm = ChatOpenAI(
model_name="gpt-3.5-turbo", # Modify model_name if you have access to GPT-4
temperature=0,
max_tokens=256,
)
chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=False,
)
def print_result_simple(query):
result = chain(query)
output_text = f"""### Question:
{query}
{result['text']}
"""
display(Markdown(output_text))
print_result_simple("How many databases can be in a Yellowbrick Instance?")
print_result_simple("What's an easy way to add users in bulk to Yellowbrick?")
try:
conn = psycopg2.connect(yellowbrick_connection_string)
except psycopg2.Error as e:
print(f"Error connecting to the database: {e}")
exit(1)
cursor = conn.cursor()
create_table_query = f"""
CREATE TABLE if not exists {embedding_table} (
id uuid,
embedding_id integer,
text character varying(60000),
metadata character varying(1024),
embedding double precision
)
DISTRIBUTE ON (id);
truncate table {embedding_table};
"""
try:
cursor.execute(create_table_query)
print(f"Table '{embedding_table}' created successfully!")
except psycopg2.Error as e:
print(f"Error creating table: {e}")
conn.rollback()
conn.commit()
cursor.close()
conn.close()
yellowbrick_doc_connection_string = (
f"postgres://{urlparse.quote(YBUSER)}:{YBPASSWORD}@{YBHOST}:5432/{YB_DOC_DATABASE}"
)
conn = psycopg2.connect(yellowbrick_doc_connection_string)
cursor = conn.cursor()
query = f"SELECT path, document FROM {YB_DOC_TABLE}"
cursor.execute(query)
yellowbrick_documents = cursor.fetchall()
print(f"Extracted {len(yellowbrick_documents)} documents successfully!")
cursor.close()
conn.close()
DOCUMENT_BASE_URL = "https://docs.yellowbrick.com/6.7.1/" # Actual URL
separator = "\n## " # This separator assumes Markdown docs from the repo uses ### as logical main header most of the time
chunk_size_limit = 2000
max_chunk_overlap = 200
documents = [
Document(
page_content=document[1],
metadata={"source": DOCUMENT_BASE_URL + document[0].replace(".md", ".html")},
)
for document in yellowbrick_documents
]
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size_limit,
chunk_overlap=max_chunk_overlap,
separators=[separator, "\nn", "\n", ",", " ", ""],
)
split_docs = text_splitter.split_documents(documents)
docs_text = [doc.page_content for doc in split_docs]
embeddings = OpenAIEmbeddings()
vector_store = Yellowbrick.from_documents(
documents=split_docs,
embedding=embeddings,
connection_string=yellowbrick_connection_string,
table=embedding_table,
)
print(f"Created vector store with {len(documents)} documents")
system_template = """Use the following pieces of context to answer the users question.
Take note of the sources and include them in the answer in the format: "SOURCES: source1 source2", use "SOURCES" in capital letters regardless of the number of sources.
If you don't know the answer, just say that "I don't know", don't try to make up an answer.
----------------
{summaries}"""
messages = [
| SystemMessagePromptTemplate.from_template(system_template) | langchain.prompts.chat.SystemMessagePromptTemplate.from_template |
from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint
from langchain_community.llms.azureml_endpoint import (
AzureMLEndpointApiType,
LlamaContentFormatter,
)
from langchain_core.messages import HumanMessage
llm = AzureMLOnlineEndpoint(
endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/score",
endpoint_api_type=AzureMLEndpointApiType.realtime,
endpoint_api_key="my-api-key",
content_formatter=LlamaContentFormatter(),
model_kwargs={"temperature": 0.8, "max_new_tokens": 400},
)
response = llm.invoke("Write me a song about sparkling water:")
response
response = llm.invoke("Write me a song about sparkling water:", temperature=0.5)
response
from langchain_community.llms.azureml_endpoint import (
AzureMLEndpointApiType,
LlamaContentFormatter,
)
from langchain_core.messages import HumanMessage
llm = AzureMLOnlineEndpoint(
endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/v1/completions",
endpoint_api_type=AzureMLEndpointApiType.serverless,
endpoint_api_key="my-api-key",
content_formatter=LlamaContentFormatter(),
model_kwargs={"temperature": 0.8, "max_new_tokens": 400},
)
response = llm.invoke("Write me a song about sparkling water:")
response
import json
import os
from typing import Dict
from langchain_community.llms.azureml_endpoint import (
AzureMLOnlineEndpoint,
ContentFormatterBase,
)
class CustomFormatter(ContentFormatterBase):
content_type = "application/json"
accepts = "application/json"
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps(
{
"inputs": [prompt],
"parameters": model_kwargs,
"options": {"use_cache": False, "wait_for_model": True},
}
)
return str.encode(input_str)
def format_response_payload(self, output: bytes) -> str:
response_json = json.loads(output)
return response_json[0]["summary_text"]
content_formatter = CustomFormatter()
llm = AzureMLOnlineEndpoint(
endpoint_api_type="realtime",
endpoint_api_key=os.getenv("BART_ENDPOINT_API_KEY"),
endpoint_url=os.getenv("BART_ENDPOINT_URL"),
model_kwargs={"temperature": 0.8, "max_new_tokens": 400},
content_formatter=content_formatter,
)
large_text = """On January 7, 2020, Blockberry Creative announced that HaSeul would not participate in the promotion for Loona's
next album because of mental health concerns. She was said to be diagnosed with "intermittent anxiety symptoms" and would be
taking time to focus on her health.[39] On February 5, 2020, Loona released their second EP titled [#] (read as hash), along
with the title track "So What".[40] Although HaSeul did not appear in the title track, her vocals are featured on three other
songs on the album, including "365". Once peaked at number 1 on the daily Gaon Retail Album Chart,[41] the EP then debuted at
number 2 on the weekly Gaon Album Chart. On March 12, 2020, Loona won their first music show trophy with "So What" on Mnet's
M Countdown.[42]
On October 19, 2020, Loona released their third EP titled [12:00] (read as midnight),[43] accompanied by its first single
"Why Not?". HaSeul was again not involved in the album, out of her own decision to focus on the recovery of her health.[44]
The EP then became their first album to enter the Billboard 200, debuting at number 112.[45] On November 18, Loona released
the music video for "Star", another song on [12:00].[46] Peaking at number 40, "Star" is Loona's first entry on the Billboard
Mainstream Top 40, making them the second K-pop girl group to enter the chart.[47]
On June 1, 2021, Loona announced that they would be having a comeback on June 28, with their fourth EP, [&] (read as and).
[48] The following day, on June 2, a teaser was posted to Loona's official social media accounts showing twelve sets of eyes,
confirming the return of member HaSeul who had been on hiatus since early 2020.[49] On June 12, group members YeoJin, Kim Lip,
Choerry, and Go Won released the song "Yum-Yum" as a collaboration with Cocomong.[50] On September 8, they released another
collaboration song named "Yummy-Yummy".[51] On June 27, 2021, Loona announced at the end of their special clip that they are
making their Japanese debut on September 15 under Universal Music Japan sublabel EMI Records.[52] On August 27, it was announced
that Loona will release the double A-side single, "Hula Hoop / Star Seed" on September 15, with a physical CD release on October
20.[53] In December, Chuu filed an injunction to suspend her exclusive contract with Blockberry Creative.[54][55]
"""
summarized_text = llm.invoke(large_text)
print(summarized_text)
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms.azureml_endpoint import DollyContentFormatter
formatter_template = "Write a {word_count} word essay about {topic}."
prompt = PromptTemplate(
input_variables=["word_count", "topic"], template=formatter_template
)
content_formatter = | DollyContentFormatter() | langchain_community.llms.azureml_endpoint.DollyContentFormatter |
from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint
from langchain_community.llms.azureml_endpoint import (
AzureMLEndpointApiType,
LlamaContentFormatter,
)
from langchain_core.messages import HumanMessage
llm = AzureMLOnlineEndpoint(
endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/score",
endpoint_api_type=AzureMLEndpointApiType.realtime,
endpoint_api_key="my-api-key",
content_formatter= | LlamaContentFormatter() | langchain_community.llms.azureml_endpoint.LlamaContentFormatter |
SOURCE = "test" # @param {type:"Query"|"CollectionGroup"|"DocumentReference"|"string"}
get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-datastore')
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
from google.colab import auth
auth.authenticate_user()
get_ipython().system('gcloud services enable datastore.googleapis.com')
from langchain_core.documents import Document
from langchain_google_datastore import DatastoreSaver
data = [Document(page_content="Hello, World!")]
saver = DatastoreSaver()
saver.upsert_documents(data)
saver = | DatastoreSaver("Collection") | langchain_google_datastore.DatastoreSaver |
from langchain.output_parsers import XMLOutputParser
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatAnthropic
model = ChatAnthropic(model="claude-2", max_tokens_to_sample=512, temperature=0.1)
actor_query = "Generate the shortened filmography for Tom Hanks."
output = model.invoke(
f"""{actor_query}
Please enclose the movies in <movie></movie> tags"""
)
print(output.content)
parser = XMLOutputParser()
prompt = PromptTemplate(
template="""{query}\n{format_instructions}""",
input_variables=["query"],
partial_variables={"format_instructions": parser.get_format_instructions()},
)
chain = prompt | model | parser
output = chain.invoke({"query": actor_query})
print(output)
parser = | XMLOutputParser(tags=["movies", "actor", "film", "name", "genre"]) | langchain.output_parsers.XMLOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet beautifulsoup4')
from langchain_community.document_loaders import ReadTheDocsLoader
loader = | ReadTheDocsLoader("rtdocs", features="html.parser") | langchain_community.document_loaders.ReadTheDocsLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tigrisdb openapi-schema-pydantic langchain-openai tiktoken')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
os.environ["TIGRIS_PROJECT"] = getpass.getpass("Tigris Project Name:")
os.environ["TIGRIS_CLIENT_ID"] = getpass.getpass("Tigris Client Id:")
os.environ["TIGRIS_CLIENT_SECRET"] = getpass.getpass("Tigris Client Secret:")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Tigris
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../../state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
vector_store = | Tigris.from_documents(docs, embeddings, index_name="my_embeddings") | langchain_community.vectorstores.Tigris.from_documents |
from langchain_community.embeddings import TensorflowHubEmbeddings
embeddings = | TensorflowHubEmbeddings() | langchain_community.embeddings.TensorflowHubEmbeddings |
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-openai langchain-anthropic langchain-community wikipedia')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
os.environ["ANTHROPIC_API_KEY"] = getpass.getpass()
from langchain_community.retrievers import WikipediaRetriever
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
wiki = WikipediaRetriever(top_k_results=6, doc_content_chars_max=2000)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\n\nHere are the Wikipedia articles:{context}",
),
("human", "{question}"),
]
)
prompt.pretty_print()
from operator import itemgetter
from typing import List
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
def format_docs(docs: List[Document]) -> str:
"""Convert Documents to a single string.:"""
formatted = [
f"Article Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for doc in docs
]
return "\n\n" + "\n\n".join(formatted)
format = itemgetter("docs") | RunnableLambda(format_docs)
answer = prompt | llm | StrOutputParser()
chain = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format)
.assign(answer=answer)
.pick(["answer", "docs"])
)
chain.invoke("How fast are cheetahs?")
from langchain_core.pydantic_v1 import BaseModel, Field
class cited_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[int] = Field(
...,
description="The integer IDs of the SPECIFIC sources which justify the answer.",
)
llm_with_tool = llm.bind_tools(
[cited_answer],
tool_choice="cited_answer",
)
example_q = """What Brian's height?
Source: 1
Information: Suzy is 6'2"
Source: 2
Information: Jeremiah is blonde
Source: 3
Information: Brian is 3 inches shorted than Suzy"""
llm_with_tool.invoke(example_q)
from langchain.output_parsers.openai_tools import JsonOutputKeyToolsParser
output_parser = JsonOutputKeyToolsParser(key_name="cited_answer", return_single=True)
(llm_with_tool | output_parser).invoke(example_q)
def format_docs_with_id(docs: List[Document]) -> str:
formatted = [
f"Source ID: {i}\nArticle Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for i, doc in enumerate(docs)
]
return "\n\n" + "\n\n".join(formatted)
format_1 = itemgetter("docs") | RunnableLambda(format_docs_with_id)
answer_1 = prompt | llm_with_tool | output_parser
chain_1 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_1)
.assign(cited_answer=answer_1)
.pick(["cited_answer", "docs"])
)
chain_1.invoke("How fast are cheetahs?")
class Citation(BaseModel):
source_id: int = Field(
...,
description="The integer ID of a SPECIFIC source which justifies the answer.",
)
quote: str = Field(
...,
description="The VERBATIM quote from the specified source that justifies the answer.",
)
class quoted_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[Citation] = Field(
..., description="Citations from the given sources that justify the answer."
)
output_parser_2 = JsonOutputKeyToolsParser(key_name="quoted_answer", return_single=True)
llm_with_tool_2 = llm.bind_tools(
[quoted_answer],
tool_choice="quoted_answer",
)
format_2 = itemgetter("docs") | RunnableLambda(format_docs_with_id)
answer_2 = prompt | llm_with_tool_2 | output_parser_2
chain_2 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_2)
.assign(quoted_answer=answer_2)
.pick(["quoted_answer", "docs"])
)
chain_2.invoke("How fast are cheetahs?")
from langchain_anthropic import ChatAnthropicMessages
anthropic = ChatAnthropicMessages(model_name="claude-instant-1.2")
system = """You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, \
answer the user question and provide citations. If none of the articles answer the question, just say you don't know.
Remember, you must return both an answer and citations. A citation consists of a VERBATIM quote that \
justifies the answer and the ID of the quote article. Return a citation for every quote across all articles \
that justify the answer. Use the following format for your final output:
<cited_answer>
<answer></answer>
<citations>
<citation><source_id></source_id><quote></quote></citation>
<citation><source_id></source_id><quote></quote></citation>
...
</citations>
</cited_answer>
Here are the Wikipedia articles:{context}"""
prompt_3 = ChatPromptTemplate.from_messages(
[("system", system), ("human", "{question}")]
)
from langchain_core.output_parsers import XMLOutputParser
def format_docs_xml(docs: List[Document]) -> str:
formatted = []
for i, doc in enumerate(docs):
doc_str = f"""\
<source id=\"{i}\">
<title>{doc.metadata['title']}</title>
<article_snippet>{doc.page_content}</article_snippet>
</source>"""
formatted.append(doc_str)
return "\n\n<sources>" + "\n".join(formatted) + "</sources>"
format_3 = itemgetter("docs") | RunnableLambda(format_docs_xml)
answer_3 = prompt_3 | anthropic | XMLOutputParser() | itemgetter("cited_answer")
chain_3 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_3)
.assign(cited_answer=answer_3)
.pick(["cited_answer", "docs"])
)
chain_3.invoke("How fast are cheetahs?")
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
splitter = RecursiveCharacterTextSplitter(
chunk_size=400,
chunk_overlap=0,
separators=["\n\n", "\n", ".", " "],
keep_separator=False,
)
compressor = EmbeddingsFilter(embeddings=OpenAIEmbeddings(), k=10)
def split_and_filter(input) -> List[Document]:
docs = input["docs"]
question = input["question"]
split_docs = splitter.split_documents(docs)
stateful_docs = compressor.compress_documents(split_docs, question)
return [stateful_doc for stateful_doc in stateful_docs]
retrieve = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki) | split_and_filter
)
docs = retrieve.invoke("How fast are cheetahs?")
for doc in docs:
print(doc.page_content)
print("\n\n")
chain_4 = (
RunnableParallel(question=RunnablePassthrough(), docs=retrieve)
.assign(context=format)
.assign(answer=answer)
.pick(["answer", "docs"])
)
chain_4.invoke("How fast are cheetahs?")
class Citation(BaseModel):
source_id: int = Field(
...,
description="The integer ID of a SPECIFIC source which justifies the answer.",
)
quote: str = Field(
...,
description="The VERBATIM quote from the specified source that justifies the answer.",
)
class annotated_answer(BaseModel):
"""Annotate the answer to the user question with quote citations that justify the answer."""
citations: List[Citation] = Field(
..., description="Citations from the given sources that justify the answer."
)
llm_with_tools_5 = llm.bind_tools(
[annotated_answer],
tool_choice="annotated_answer",
)
from langchain_core.prompts import MessagesPlaceholder
prompt_5 = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\n\nHere are the Wikipedia articles:{context}",
),
("human", "{question}"),
| MessagesPlaceholder("chat_history", optional=True) | langchain_core.prompts.MessagesPlaceholder |
import os
import pprint
os.environ["SERPER_API_KEY"] = ""
from langchain_community.utilities import GoogleSerperAPIWrapper
search = GoogleSerperAPIWrapper()
search.run("Obama's first name?")
os.environ["OPENAI_API_KEY"] = ""
from langchain.agents import AgentType, Tool, initialize_agent
from langchain_community.utilities import GoogleSerperAPIWrapper
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
search = GoogleSerperAPIWrapper()
tools = [
Tool(
name="Intermediate Answer",
func=search.run,
description="useful for when you need to ask with search",
)
]
self_ask_with_search = initialize_agent(
tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True
)
self_ask_with_search.run(
"What is the hometown of the reigning men's U.S. Open champion?"
)
search = | GoogleSerperAPIWrapper() | langchain_community.utilities.GoogleSerperAPIWrapper |
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
template = """Answer the users question based only on the following context:
<context>
{context}
</context>
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI(temperature=0)
search = DuckDuckGoSearchAPIWrapper()
def retriever(query):
return search.run(query)
chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| model
| StrOutputParser()
)
simple_query = "what is langchain?"
chain.invoke(simple_query)
distracted_query = "man that sam bankman fried trial was crazy! what is langchain?"
chain.invoke(distracted_query)
retriever(distracted_query)
template = """Provide a better search query for \
web search engine to answer the given question, end \
the queries with ’**’. Question: \
{x} Answer:"""
rewrite_prompt = ChatPromptTemplate.from_template(template)
from langchain import hub
rewrite_prompt = hub.pull("langchain-ai/rewrite")
print(rewrite_prompt.template)
def _parse(text):
return text.strip("**")
rewriter = rewrite_prompt | ChatOpenAI(temperature=0) | StrOutputParser() | _parse
rewriter.invoke({"x": distracted_query})
rewrite_retrieve_read_chain = (
{
"context": {"x": RunnablePassthrough()} | rewriter | retriever,
"question": RunnablePassthrough(),
}
| prompt
| model
| | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
from typing import List
from langchain.output_parsers import YamlOutputParser
from langchain.prompts import PromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_openai import ChatOpenAI
model = | ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet "cassio>=0.1.4"')
import os
from getpass import getpass
from datasets import (
load_dataset,
)
from langchain_community.document_loaders import PyPDFLoader
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
os.environ["OPENAI_API_KEY"] = getpass("OPENAI_API_KEY = ")
embe = OpenAIEmbeddings()
from langchain_community.vectorstores import Cassandra
from cassandra.cluster import Cluster
cluster = Cluster(["127.0.0.1"])
session = cluster.connect()
import cassio
CASSANDRA_KEYSPACE = input("CASSANDRA_KEYSPACE = ")
cassio.init(session=session, keyspace=CASSANDRA_KEYSPACE)
vstore = Cassandra(
embedding=embe,
table_name="cassandra_vector_demo",
)
ASTRA_DB_ID = input("ASTRA_DB_ID = ")
ASTRA_DB_APPLICATION_TOKEN = getpass("ASTRA_DB_APPLICATION_TOKEN = ")
desired_keyspace = input("ASTRA_DB_KEYSPACE (optional, can be left empty) = ")
if desired_keyspace:
ASTRA_DB_KEYSPACE = desired_keyspace
else:
ASTRA_DB_KEYSPACE = None
import cassio
cassio.init(
database_id=ASTRA_DB_ID,
token=ASTRA_DB_APPLICATION_TOKEN,
keyspace=ASTRA_DB_KEYSPACE,
)
vstore = Cassandra(
embedding=embe,
table_name="cassandra_vector_demo",
)
philo_dataset = load_dataset("datastax/philosopher-quotes")["train"]
docs = []
for entry in philo_dataset:
metadata = {"author": entry["author"]}
doc = Document(page_content=entry["quote"], metadata=metadata)
docs.append(doc)
inserted_ids = vstore.add_documents(docs)
print(f"\nInserted {len(inserted_ids)} documents.")
texts = ["I think, therefore I am.", "To the things themselves!"]
metadatas = [{"author": "descartes"}, {"author": "husserl"}]
ids = ["desc_01", "huss_xy"]
inserted_ids_2 = vstore.add_texts(texts=texts, metadatas=metadatas, ids=ids)
print(f"\nInserted {len(inserted_ids_2)} documents.")
results = vstore.similarity_search("Our life is what we make of it", k=3)
for res in results:
print(f"* {res.page_content} [{res.metadata}]")
results_filtered = vstore.similarity_search(
"Our life is what we make of it",
k=3,
filter={"author": "plato"},
)
for res in results_filtered:
print(f"* {res.page_content} [{res.metadata}]")
results = vstore.similarity_search_with_score("Our life is what we make of it", k=3)
for res, score in results:
print(f"* [SIM={score:3f}] {res.page_content} [{res.metadata}]")
results = vstore.max_marginal_relevance_search(
"Our life is what we make of it",
k=3,
filter={"author": "aristotle"},
)
for res in results:
print(f"* {res.page_content} [{res.metadata}]")
delete_1 = vstore.delete(inserted_ids[:3])
print(f"all_succeed={delete_1}") # True, all documents deleted
delete_2 = vstore.delete(inserted_ids[2:5])
print(f"some_succeeds={delete_2}") # True, though some IDs were gone already
get_ipython().system('curl -L "https://github.com/awesome-astra/datasets/blob/main/demo-resources/what-is-philosophy/what-is-philosophy.pdf?raw=true" -o "what-is-philosophy.pdf"')
pdf_loader = | PyPDFLoader("what-is-philosophy.pdf") | langchain_community.document_loaders.PyPDFLoader |
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
history = StreamlitChatMessageHistory(key="chat_messages")
history.add_user_message("hi!")
history.add_ai_message("whats up?")
history.messages
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
msgs = | StreamlitChatMessageHistory(key="special_app_key") | langchain_community.chat_message_histories.StreamlitChatMessageHistory |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet gpt4all > /dev/null')
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import GPT4All
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
local_path = (
"./models/ggml-gpt4all-l13b-snoozy.bin" # replace with your desired local file path
)
callbacks = [ | StreamingStdOutCallbackHandler() | langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet duckduckgo-search')
from langchain.tools import DuckDuckGoSearchRun
search = | DuckDuckGoSearchRun() | langchain.tools.DuckDuckGoSearchRun |
import os
from langchain.agents import AgentType, initialize_agent
from langchain_community.tools.connery import ConneryService
from langchain_openai import ChatOpenAI
os.environ["CONNERY_RUNNER_URL"] = ""
os.environ["CONNERY_RUNNER_API_KEY"] = ""
os.environ["OPENAI_API_KEY"] = ""
recepient_email = "test@example.com"
connery_service = | ConneryService() | langchain_community.tools.connery.ConneryService |
import json
from langchain.adapters.openai import convert_message_to_dict
from langchain_core.messages import AIMessage
with open("example_data/dataset_twitter-scraper_2023-08-23_22-13-19-740.json") as f:
data = json.load(f)
tweets = [d["full_text"] for d in data if "t.co" not in d["full_text"]]
messages = [ | AIMessage(content=t) | langchain_core.messages.AIMessage |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pymysql')
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import (
DirectoryLoader,
UnstructuredMarkdownLoader,
)
from langchain_community.vectorstores import StarRocks
from langchain_community.vectorstores.starrocks import StarRocksSettings
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import TokenTextSplitter
update_vectordb = False
loader = DirectoryLoader(
"./docs", glob="**/*.md", loader_cls=UnstructuredMarkdownLoader
)
documents = loader.load()
text_splitter = TokenTextSplitter(chunk_size=400, chunk_overlap=50)
split_docs = text_splitter.split_documents(documents)
update_vectordb = True
split_docs[-20]
print("# docs = %d, # splits = %d" % (len(documents), len(split_docs)))
def gen_starrocks(update_vectordb, embeddings, settings):
if update_vectordb:
docsearch = | StarRocks.from_documents(split_docs, embeddings, config=settings) | langchain_community.vectorstores.StarRocks.from_documents |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-nvidia-ai-endpoints')
import getpass
import os
if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"):
nvapi_key = getpass.getpass("Enter your NVIDIA API key: ")
assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key"
os.environ["NVIDIA_API_KEY"] = nvapi_key
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="mixtral_8x7b")
result = llm.invoke("Write a ballad about LangChain.")
print(result.content)
print(llm.batch(["What's 2*3?", "What's 2*6?"]))
for chunk in llm.stream("How far can a seagull fly in one day?"):
print(chunk.content, end="|")
async for chunk in llm.astream(
"How long does it take for monarch butterflies to migrate?"
):
print(chunk.content, end="|")
ChatNVIDIA.get_available_models()
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_nvidia_ai_endpoints import ChatNVIDIA
prompt = ChatPromptTemplate.from_messages(
[("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")]
)
chain = prompt | | ChatNVIDIA(model="llama2_13b") | langchain_nvidia_ai_endpoints.ChatNVIDIA |
get_ipython().system('pip install boto3')
from langchain_experimental.recommenders import AmazonPersonalize
recommender_arn = "<insert_arn>"
client = AmazonPersonalize(
credentials_profile_name="default",
region_name="us-west-2",
recommender_arn=recommender_arn,
)
client.get_recommendations(user_id="1")
from langchain.llms.bedrock import Bedrock
from langchain_experimental.recommenders import AmazonPersonalizeChain
bedrock_llm = Bedrock(model_id="anthropic.claude-v2", region_name="us-west-2")
chain = AmazonPersonalizeChain.from_llm(
llm=bedrock_llm, client=client, return_direct=False
)
response = chain({"user_id": "1"})
print(response)
from langchain.prompts.prompt import PromptTemplate
RANDOM_PROMPT_QUERY = """
You are a skilled publicist. Write a high-converting marketing email advertising several movies available in a video-on-demand streaming platform next week,
given the movie and user information below. Your email will leverage the power of storytelling and persuasive language.
The movies to recommend and their information is contained in the <movie> tag.
All movies in the <movie> tag must be recommended. Give a summary of the movies and why the human should watch them.
Put the email between <email> tags.
<movie>
{result}
</movie>
Assistant:
"""
RANDOM_PROMPT = PromptTemplate(input_variables=["result"], template=RANDOM_PROMPT_QUERY)
chain = AmazonPersonalizeChain.from_llm(
llm=bedrock_llm, client=client, return_direct=False, prompt_template=RANDOM_PROMPT
)
chain.run({"user_id": "1", "item_id": "234"})
from langchain.chains import LLMChain, SequentialChain
RANDOM_PROMPT_QUERY_2 = """
You are a skilled publicist. Write a high-converting marketing email advertising several movies available in a video-on-demand streaming platform next week,
given the movie and user information below. Your email will leverage the power of storytelling and persuasive language.
You want the email to impress the user, so make it appealing to them.
The movies to recommend and their information is contained in the <movie> tag.
All movies in the <movie> tag must be recommended. Give a summary of the movies and why the human should watch them.
Put the email between <email> tags.
<movie>
{result}
</movie>
Assistant:
"""
RANDOM_PROMPT_2 = PromptTemplate(
input_variables=["result"], template=RANDOM_PROMPT_QUERY_2
)
personalize_chain_instance = AmazonPersonalizeChain.from_llm(
llm=bedrock_llm, client=client, return_direct=True
)
random_chain_instance = | LLMChain(llm=bedrock_llm, prompt=RANDOM_PROMPT_2) | langchain.chains.LLMChain |
get_ipython().run_line_magic('pip', 'install -U --quiet langchain langchain_community openai chromadb langchain-experimental')
get_ipython().run_line_magic('pip', 'install --quiet "unstructured[all-docs]" pypdf pillow pydantic lxml pillow matplotlib chromadb tiktoken')
import logging
import zipfile
import requests
logging.basicConfig(level=logging.INFO)
data_url = "https://storage.googleapis.com/benchmarks-artifacts/langchain-docs-benchmarking/cj.zip"
result = requests.get(data_url)
filename = "cj.zip"
with open(filename, "wb") as file:
file.write(result.content)
with zipfile.ZipFile(filename, "r") as zip_ref:
zip_ref.extractall()
from langchain_community.document_loaders import PyPDFLoader
loader = PyPDFLoader("./cj/cj.pdf")
docs = loader.load()
tables = []
texts = [d.page_content for d in docs]
len(texts)
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatVertexAI
from langchain_community.llms import VertexAI
from langchain_core.messages import AIMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableLambda
def generate_text_summaries(texts, tables, summarize_texts=False):
"""
Summarize text elements
texts: List of str
tables: List of str
summarize_texts: Bool to summarize texts
"""
prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \
These summaries will be embedded and used to retrieve the raw text or table elements. \
Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """
prompt = PromptTemplate.from_template(prompt_text)
empty_response = RunnableLambda(
lambda x: AIMessage(content="Error processing document")
)
model = VertexAI(
temperature=0, model_name="gemini-pro", max_output_tokens=1024
).with_fallbacks([empty_response])
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
text_summaries = []
table_summaries = []
if texts and summarize_texts:
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 1})
elif texts:
text_summaries = texts
if tables:
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 1})
return text_summaries, table_summaries
text_summaries, table_summaries = generate_text_summaries(
texts, tables, summarize_texts=True
)
len(text_summaries)
import base64
import os
from langchain_core.messages import HumanMessage
def encode_image(image_path):
"""Getting the base64 string"""
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
def image_summarize(img_base64, prompt):
"""Make image summary"""
model = | ChatVertexAI(model_name="gemini-pro-vision", max_output_tokens=1024) | langchain_community.chat_models.ChatVertexAI |
get_ipython().run_line_magic('', 'pip install --upgrade --quiet flashrank')
get_ipython().run_line_magic('', 'pip install --upgrade --quiet faiss')
get_ipython().run_line_magic('', 'pip install --upgrade --quiet faiss_cpu')
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
documents = TextLoader(
"../../modules/state_of_the_union.txt",
).load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
embedding = OpenAIEmbeddings(model="text-embedding-ada-002")
retriever = | FAISS.from_documents(texts, embedding) | langchain_community.vectorstores.FAISS.from_documents |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet "cassio>=0.1.4"')
import os
from getpass import getpass
from datasets import (
load_dataset,
)
from langchain_community.document_loaders import PyPDFLoader
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
os.environ["OPENAI_API_KEY"] = getpass("OPENAI_API_KEY = ")
embe = OpenAIEmbeddings()
from langchain_community.vectorstores import Cassandra
from cassandra.cluster import Cluster
cluster = Cluster(["127.0.0.1"])
session = cluster.connect()
import cassio
CASSANDRA_KEYSPACE = input("CASSANDRA_KEYSPACE = ")
cassio.init(session=session, keyspace=CASSANDRA_KEYSPACE)
vstore = Cassandra(
embedding=embe,
table_name="cassandra_vector_demo",
)
ASTRA_DB_ID = input("ASTRA_DB_ID = ")
ASTRA_DB_APPLICATION_TOKEN = getpass("ASTRA_DB_APPLICATION_TOKEN = ")
desired_keyspace = input("ASTRA_DB_KEYSPACE (optional, can be left empty) = ")
if desired_keyspace:
ASTRA_DB_KEYSPACE = desired_keyspace
else:
ASTRA_DB_KEYSPACE = None
import cassio
cassio.init(
database_id=ASTRA_DB_ID,
token=ASTRA_DB_APPLICATION_TOKEN,
keyspace=ASTRA_DB_KEYSPACE,
)
vstore = Cassandra(
embedding=embe,
table_name="cassandra_vector_demo",
)
philo_dataset = load_dataset("datastax/philosopher-quotes")["train"]
docs = []
for entry in philo_dataset:
metadata = {"author": entry["author"]}
doc = | Document(page_content=entry["quote"], metadata=metadata) | langchain_core.documents.Document |
import os
import comet_llm
os.environ["LANGCHAIN_COMET_TRACING"] = "true"
comet_llm.init()
os.environ["COMET_PROJECT_NAME"] = "comet-example-langchain-tracing"
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.llms import OpenAI
llm = | OpenAI(temperature=0) | langchain.llms.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet spacy')
get_ipython().system('python3 -m spacy download en_core_web_sm')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet nomic')
import time
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import AtlasDB
from langchain_text_splitters import SpacyTextSplitter
ATLAS_TEST_API_KEY = "7xDPkYXSYDc1_ErdTPIcoAR9RNd8YDlkS3nVNXcVoIMZ6"
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | SpacyTextSplitter(separator="|") | langchain_text_splitters.SpacyTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-community langchainhub gpt4all chromadb')
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = | WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") | langchain_community.document_loaders.WebBaseLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet redis redisvl langchain-openai tiktoken')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redis_url = "redis://localhost:6379"
redis_url = "redis://:secret@redis:7379/2"
redis_url = "redis://joe:secret@redis/0"
redis_url = "redis+sentinel://localhost:26379"
redis_url = "redis+sentinel://joe:secret@redis"
redis_url = "redis+sentinel://redis:26379/zone-1/2"
redis_url = "rediss://localhost:6379"
redis_url = "rediss+sentinel://localhost"
metadata = [
{
"user": "john",
"age": 18,
"job": "engineer",
"credit_score": "high",
},
{
"user": "derrick",
"age": 45,
"job": "doctor",
"credit_score": "low",
},
{
"user": "nancy",
"age": 94,
"job": "doctor",
"credit_score": "high",
},
{
"user": "tyler",
"age": 100,
"job": "engineer",
"credit_score": "high",
},
{
"user": "joe",
"age": 35,
"job": "dentist",
"credit_score": "medium",
},
]
texts = ["foo", "foo", "foo", "bar", "bar"]
from langchain_community.vectorstores.redis import Redis
rds = Redis.from_texts(
texts,
embeddings,
metadatas=metadata,
redis_url="redis://localhost:6379",
index_name="users",
)
rds.index_name
get_ipython().system('rvl index listall')
get_ipython().system('rvl index info -i users')
get_ipython().system('rvl stats -i users')
results = rds.similarity_search("foo")
print(results[0].page_content)
results = rds.similarity_search("foo", k=3)
meta = results[1].metadata
print("Key of the document in Redis: ", meta.pop("id"))
print("Metadata of the document: ", meta)
results = rds.similarity_search_with_score("foo", k=5)
for result in results:
print(f"Content: {result[0].page_content} --- Score: {result[1]}")
results = rds.similarity_search_with_score("foo", k=5, distance_threshold=0.1)
for result in results:
print(f"Content: {result[0].page_content} --- Score: {result[1]}")
results = rds.similarity_search_with_relevance_scores("foo", k=5)
for result in results:
print(f"Content: {result[0].page_content} --- Similiarity: {result[1]}")
results = rds.similarity_search_with_relevance_scores("foo", k=5, score_threshold=0.9)
for result in results:
print(f"Content: {result[0].page_content} --- Similarity: {result[1]}")
new_document = ["baz"]
new_metadata = [{"user": "sam", "age": 50, "job": "janitor", "credit_score": "high"}]
rds.add_texts(new_document, new_metadata)
results = rds.similarity_search("baz", k=3)
print(results[0].metadata)
results = rds.max_marginal_relevance_search("foo")
results = rds.max_marginal_relevance_search("foo", lambda_mult=0.1)
rds.write_schema("redis_schema.yaml")
new_rds = Redis.from_existing_index(
embeddings,
index_name="users",
redis_url="redis://localhost:6379",
schema="redis_schema.yaml",
)
results = new_rds.similarity_search("foo", k=3)
print(results[0].metadata)
new_rds.schema == rds.schema
index_schema = {
"tag": [{"name": "credit_score"}],
"text": [{"name": "user"}, {"name": "job"}],
"numeric": [{"name": "age"}],
}
rds, keys = Redis.from_texts_return_keys(
texts,
embeddings,
metadatas=metadata,
redis_url="redis://localhost:6379",
index_name="users_modified",
index_schema=index_schema, # pass in the new index schema
)
from langchain_community.vectorstores.redis import RedisText
is_engineer = RedisText("job") == "engineer"
results = rds.similarity_search("foo", k=3, filter=is_engineer)
print("Job:", results[0].metadata["job"])
print("Engineers in the dataset:", len(results))
starts_with_doc = RedisText("job") % "doc*"
results = rds.similarity_search("foo", k=3, filter=starts_with_doc)
for result in results:
print("Job:", result.metadata["job"])
print("Jobs in dataset that start with 'doc':", len(results))
from langchain_community.vectorstores.redis import RedisNum
is_over_18 = RedisNum("age") > 18
is_under_99 = RedisNum("age") < 99
age_range = is_over_18 & is_under_99
results = rds.similarity_search("foo", filter=age_range)
for result in results:
print("User:", result.metadata["user"], "is", result.metadata["age"])
age_range = ( | RedisNum("age") | langchain_community.vectorstores.redis.RedisNum |
import re
from typing import Union
from langchain.agents import (
AgentExecutor,
AgentOutputParser,
LLMSingleActionAgent,
Tool,
)
from langchain.chains import LLMChain
from langchain.prompts import StringPromptTemplate
from langchain_community.utilities import SerpAPIWrapper
from langchain_core.agents import AgentAction, AgentFinish
from langchain_openai import OpenAI
search = | SerpAPIWrapper() | langchain_community.utilities.SerpAPIWrapper |
meals = [
"Beef Enchiladas with Feta cheese. Mexican-Greek fusion",
"Chicken Flatbreads with red sauce. Italian-Mexican fusion",
"Veggie sweet potato quesadillas with vegan cheese",
"One-Pan Tortelonni bake with peppers and onions",
]
from langchain_openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-instruct")
from langchain.prompts import PromptTemplate
PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}".
Embed the meal into the given text: "{text_to_personalize}".
Prepend a personalized message including the user's name "{user}"
and their preference "{preference}".
Make it sound good.
"""
PROMPT = PromptTemplate(
input_variables=["meal", "text_to_personalize", "user", "preference"],
template=PROMPT_TEMPLATE,
)
import langchain_experimental.rl_chain as rl_chain
chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs \
believe you will love it!",
)
print(response["response"])
for _ in range(5):
try:
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
except Exception as e:
print(e)
print(response["response"])
print()
scoring_criteria_template = (
"Given {preference} rank how good or bad this selection is {meal}"
)
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=rl_chain.AutoSelectionScorer(
llm=llm, scoring_criteria_template_str=scoring_criteria_template
),
)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
print(response["response"])
selection_metadata = response["selection_metadata"]
print(
f"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}"
)
class CustomSelectionScorer(rl_chain.SelectionScorer):
def score_response(
self, inputs, llm_response: str, event: rl_chain.PickBestEvent
) -> float:
print(event.based_on)
print(event.to_select_from)
selected_meal = event.to_select_from["meal"][event.selected.index]
print(f"selected meal: {selected_meal}")
if "Tom" in event.based_on["user"]:
if "Vegetarian" in event.based_on["preference"]:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 0.0
else:
return 1.0
else:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 1.0
else:
return 0.0
else:
raise NotImplementedError("I don't know how to score this user")
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=CustomSelectionScorer(),
)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
class CustomSelectionScorer(rl_chain.SelectionScorer):
def score_preference(self, preference, selected_meal):
if "Vegetarian" in preference:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 0.0
else:
return 1.0
else:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 1.0
else:
return 0.0
def score_response(
self, inputs, llm_response: str, event: rl_chain.PickBestEvent
) -> float:
selected_meal = event.to_select_from["meal"][event.selected.index]
if "Tom" in event.based_on["user"]:
return self.score_preference(event.based_on["preference"], selected_meal)
elif "Anna" in event.based_on["user"]:
return self.score_preference(event.based_on["preference"], selected_meal)
else:
raise NotImplementedError("I don't know how to score this user")
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=CustomSelectionScorer(),
metrics_step=5,
metrics_window_size=5, # rolling window average
)
random_chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=CustomSelectionScorer(),
metrics_step=5,
metrics_window_size=5, # rolling window average
policy=rl_chain.PickBestRandomPolicy, # set the random policy instead of default
)
for _ in range(20):
try:
chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
random_chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Anna"),
preference=rl_chain.BasedOn(["Loves meat", "especially beef"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
random_chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Anna"),
preference=rl_chain.BasedOn(["Loves meat", "especially beef"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
except Exception as e:
print(e)
from matplotlib import pyplot as plt
chain.metrics.to_pandas()["score"].plot(label="default learning policy")
random_chain.metrics.to_pandas()["score"].plot(label="random selection policy")
plt.legend()
print(
f"The final average score for the default policy, calculated over a rolling window, is: {chain.metrics.to_pandas()['score'].iloc[-1]}"
)
print(
f"The final average score for the random policy, calculated over a rolling window, is: {random_chain.metrics.to_pandas()['score'].iloc[-1]}"
)
from langchain.globals import set_debug
from langchain.prompts.prompt import PromptTemplate
set_debug(True)
REWARD_PROMPT_TEMPLATE = """
Given {preference} rank how good or bad this selection is {meal}
IMPORTANT: you MUST return a single number between -1 and 1, -1 being bad, 1 being good
"""
REWARD_PROMPT = PromptTemplate(
input_variables=["preference", "meal"],
template=REWARD_PROMPT_TEMPLATE,
)
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, prompt=REWARD_PROMPT),
)
chain.run(
meal=rl_chain.ToSelectFrom(meals),
user= | rl_chain.BasedOn("Tom") | langchain_experimental.rl_chain.BasedOn |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-community langchainhub gpt4all chromadb')
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
from langchain_community.embeddings import GPT4AllEmbeddings
from langchain_community.vectorstores import Chroma
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
question = "What are the approaches to Task Decomposition?"
docs = vectorstore.similarity_search(question)
len(docs)
docs[0]
get_ipython().run_line_magic('pip', 'install --upgrade --quiet llama-cpp-python')
get_ipython().system(' CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 /Users/rlm/miniforge3/envs/llama/bin/pip install -U llama-cpp-python --no-cache-dir')
from langchain_community.llms import LlamaCpp
n_gpu_layers = 1 # Metal set to 1 is enough.
n_batch = 512 # Should be between 1 and n_ctx, consider the amount of RAM of your Apple Silicon Chip.
llm = LlamaCpp(
model_path="/Users/rlm/Desktop/Code/llama.cpp/models/llama-2-13b-chat.ggufv3.q4_0.bin",
n_gpu_layers=n_gpu_layers,
n_batch=n_batch,
n_ctx=2048,
f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls
verbose=True,
)
llm.invoke("Simulate a rap battle between Stephen Colbert and John Oliver")
from langchain_community.llms import GPT4All
gpt4all = GPT4All(
model="/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin",
max_tokens=2048,
)
from langchain_community.llms.llamafile import Llamafile
llamafile = | Llamafile() | langchain_community.llms.llamafile.Llamafile |
from langchain_openai import OpenAIEmbeddings
from langchain_pinecone import PineconeVectorStore
all_documents = {
"doc1": "Climate change and economic impact.",
"doc2": "Public health concerns due to climate change.",
"doc3": "Climate change: A social perspective.",
"doc4": "Technological solutions to climate change.",
"doc5": "Policy changes needed to combat climate change.",
"doc6": "Climate change and its impact on biodiversity.",
"doc7": "Climate change: The science and models.",
"doc8": "Global warming: A subset of climate change.",
"doc9": "How climate change affects daily weather.",
"doc10": "The history of climate change activism.",
}
vectorstore = PineconeVectorStore.from_texts(
list(all_documents.values()), OpenAIEmbeddings(), index_name="rag-fusion"
)
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
from langchain import hub
prompt = hub.pull("langchain-ai/rag-fusion-query-generation")
generate_queries = (
prompt | ChatOpenAI(temperature=0) | StrOutputParser() | (lambda x: x.split("\n"))
)
original_query = "impact of climate change"
vectorstore = PineconeVectorStore.from_existing_index("rag-fusion", OpenAIEmbeddings())
retriever = vectorstore.as_retriever()
from langchain.load import dumps, loads
def reciprocal_rank_fusion(results: list[list], k=60):
fused_scores = {}
for docs in results:
for rank, doc in enumerate(docs):
doc_str = | dumps(doc) | langchain.load.dumps |
from langchain_experimental.llm_bash.base import LLMBashChain
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
text = "Please write a bash script that prints 'Hello World' to the console."
bash_chain = | LLMBashChain.from_llm(llm, verbose=True) | langchain_experimental.llm_bash.base.LLMBashChain.from_llm |
from langchain.tools import BraveSearch
api_key = "API KEY"
tool = | BraveSearch.from_api_key(api_key=api_key, search_kwargs={"count": 3}) | langchain.tools.BraveSearch.from_api_key |
from typing import Optional
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_experimental.autonomous_agents import BabyAGI
from langchain_openai import OpenAI, OpenAIEmbeddings
get_ipython().run_line_magic('pip', 'install faiss-cpu > /dev/null')
get_ipython().run_line_magic('pip', 'install google-search-results > /dev/null')
from langchain.docstore import InMemoryDocstore
from langchain_community.vectorstores import FAISS
embeddings_model = OpenAIEmbeddings()
import faiss
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.chains import LLMChain
from langchain_community.utilities import SerpAPIWrapper
from langchain_openai import OpenAI
todo_prompt = PromptTemplate.from_template(
"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}"
)
todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt)
search = SerpAPIWrapper()
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
),
Tool(
name="TODO",
func=todo_chain.run,
description="useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!",
),
]
prefix = """You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}."""
suffix = """Question: {task}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["objective", "task", "context", "agent_scratchpad"],
)
llm = OpenAI(temperature=0)
llm_chain = LLMChain(llm=llm, prompt=prompt)
tool_names = [tool.name for tool in tools]
agent = | ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names) | langchain.agents.ZeroShotAgent |
from langchain_community.utils.openai_functions import (
convert_pydantic_to_openai_function,
)
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field, validator
from langchain_openai import ChatOpenAI
class Joke(BaseModel):
"""Joke to tell user."""
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
openai_functions = [convert_pydantic_to_openai_function(Joke)]
model = ChatOpenAI(temperature=0)
prompt = | ChatPromptTemplate.from_messages(
[("system", "You are helpful assistant"), ("user", "{input}") | langchain_core.prompts.ChatPromptTemplate.from_messages |
import os
from langchain.chains import ConversationalRetrievalChain
from langchain_community.vectorstores import Vectara
from langchain_openai import OpenAI
from langchain_community.document_loaders import TextLoader
loader = TextLoader("state_of_the_union.txt")
documents = loader.load()
vectara = Vectara.from_documents(documents, embedding=None)
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
openai_api_key = os.environ["OPENAI_API_KEY"]
llm = OpenAI(openai_api_key=openai_api_key, temperature=0)
retriever = vectara.as_retriever()
d = retriever.get_relevant_documents(
"What did the president say about Ketanji Brown Jackson", k=2
)
print(d)
bot = ConversationalRetrievalChain.from_llm(
llm, retriever, memory=memory, verbose=False
)
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query})
result["answer"]
query = "Did he mention who she suceeded"
result = bot.invoke({"question": query})
result["answer"]
bot = ConversationalRetrievalChain.from_llm(
OpenAI(temperature=0), vectara.as_retriever()
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["answer"]
chat_history = [(query, result["answer"])]
query = "Did he mention who she suceeded"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["answer"]
bot = ConversationalRetrievalChain.from_llm(
llm, vectara.as_retriever(), return_source_documents=True
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["source_documents"][0]
from langchain.chains import LLMChain
from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
from langchain.chains.question_answering import load_qa_chain
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = load_qa_chain(llm, chain_type="map_reduce")
chain = ConversationalRetrievalChain(
retriever=vectara.as_retriever(),
question_generator=question_generator,
combine_docs_chain=doc_chain,
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = chain({"question": query, "chat_history": chat_history})
result["answer"]
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = load_qa_with_sources_chain(llm, chain_type="map_reduce")
chain = ConversationalRetrievalChain(
retriever=vectara.as_retriever(),
question_generator=question_generator,
combine_docs_chain=doc_chain,
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = chain({"question": query, "chat_history": chat_history})
result["answer"]
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains.conversational_retrieval.prompts import (
CONDENSE_QUESTION_PROMPT,
QA_PROMPT,
)
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
llm = OpenAI(temperature=0, openai_api_key=openai_api_key)
streaming_llm = OpenAI(
streaming=True,
callbacks=[StreamingStdOutCallbackHandler()],
temperature=0,
openai_api_key=openai_api_key,
)
question_generator = | LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT) | langchain.chains.llm.LLMChain |
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
def initialize_chain(instructions, memory=None):
if memory is None:
memory = | ConversationBufferWindowMemory() | langchain.memory.ConversationBufferWindowMemory |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai.chat_models import ChatOpenAI
model = ChatOpenAI()
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're an assistant who's good at {ability}. Respond in 20 words or fewer",
),
MessagesPlaceholder(variable_name="history"),
("human", "{input}"),
]
)
runnable = prompt | model
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
store = {}
def get_session_history(session_id: str) -> BaseChatMessageHistory:
if session_id not in store:
store[session_id] = ChatMessageHistory()
return store[session_id]
with_message_history = RunnableWithMessageHistory(
runnable,
get_session_history,
input_messages_key="input",
history_messages_key="history",
)
with_message_history.invoke(
{"ability": "math", "input": "What does cosine mean?"},
config={"configurable": {"session_id": "abc123"}},
)
with_message_history.invoke(
{"ability": "math", "input": "What?"},
config={"configurable": {"session_id": "abc123"}},
)
with_message_history.invoke(
{"ability": "math", "input": "What?"},
config={"configurable": {"session_id": "def234"}},
)
from langchain_core.runnables import ConfigurableFieldSpec
store = {}
def get_session_history(user_id: str, conversation_id: str) -> BaseChatMessageHistory:
if (user_id, conversation_id) not in store:
store[(user_id, conversation_id)] = ChatMessageHistory()
return store[(user_id, conversation_id)]
with_message_history = RunnableWithMessageHistory(
runnable,
get_session_history,
input_messages_key="input",
history_messages_key="history",
history_factory_config=[
ConfigurableFieldSpec(
id="user_id",
annotation=str,
name="User ID",
description="Unique identifier for the user.",
default="",
is_shared=True,
),
ConfigurableFieldSpec(
id="conversation_id",
annotation=str,
name="Conversation ID",
description="Unique identifier for the conversation.",
default="",
is_shared=True,
),
],
)
with_message_history.invoke(
{"ability": "math", "input": "Hello"},
config={"configurable": {"user_id": "123", "conversation_id": "1"}},
)
from langchain_core.messages import HumanMessage
from langchain_core.runnables import RunnableParallel
chain = RunnableParallel({"output_message": ChatOpenAI()})
def get_session_history(session_id: str) -> BaseChatMessageHistory:
if session_id not in store:
store[session_id] = ChatMessageHistory()
return store[session_id]
with_message_history = RunnableWithMessageHistory(
chain,
get_session_history,
output_messages_key="output_message",
)
with_message_history.invoke(
[HumanMessage(content="What did Simone de Beauvoir believe about free will")],
config={"configurable": {"session_id": "baz"}},
)
with_message_history.invoke(
[HumanMessage(content="How did this compare to Sartre")],
config={"configurable": {"session_id": "baz"}},
)
RunnableWithMessageHistory(
| ChatOpenAI() | langchain_openai.chat_models.ChatOpenAI |
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryByteStore
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loaders = [
TextLoader("../../paul_graham_essay.txt"),
TextLoader("../../state_of_the_union.txt"),
]
docs = []
for loader in loaders:
docs.extend(loader.load())
text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)
docs = text_splitter.split_documents(docs)
vectorstore = Chroma(
collection_name="full_documents", embedding_function=OpenAIEmbeddings()
)
store = InMemoryByteStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
byte_store=store,
id_key=id_key,
)
import uuid
doc_ids = [str(uuid.uuid4()) for _ in docs]
child_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
sub_docs = []
for i, doc in enumerate(docs):
_id = doc_ids[i]
_sub_docs = child_text_splitter.split_documents([doc])
for _doc in _sub_docs:
_doc.metadata[id_key] = _id
sub_docs.extend(_sub_docs)
retriever.vectorstore.add_documents(sub_docs)
retriever.docstore.mset(list(zip(doc_ids, docs)))
retriever.vectorstore.similarity_search("justice breyer")[0]
len(retriever.get_relevant_documents("justice breyer")[0].page_content)
from langchain.retrievers.multi_vector import SearchType
retriever.search_type = SearchType.mmr
len(retriever.get_relevant_documents("justice breyer")[0].page_content)
import uuid
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
chain = (
{"doc": lambda x: x.page_content}
| ChatPromptTemplate.from_template("Summarize the following document:\n\n{doc}")
| ChatOpenAI(max_retries=0)
| StrOutputParser()
)
summaries = chain.batch(docs, {"max_concurrency": 5})
vectorstore = Chroma(collection_name="summaries", embedding_function=OpenAIEmbeddings())
store = | InMemoryByteStore() | langchain.storage.InMemoryByteStore |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet yfinance')
import os
os.environ["OPENAI_API_KEY"] = "..."
from langchain.agents import AgentType, initialize_agent
from langchain_community.tools.yahoo_finance_news import YahooFinanceNewsTool
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0.0)
tools = [YahooFinanceNewsTool()]
agent_chain = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
agent_chain.run(
"What happens today with Microsoft stocks?",
)
agent_chain.run(
"How does Microsoft feels today comparing with Nvidia?",
)
tool = | YahooFinanceNewsTool() | langchain_community.tools.yahoo_finance_news.YahooFinanceNewsTool |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet opencv-python scikit-image')
import os
from langchain_openai import OpenAI
os.environ["OPENAI_API_KEY"] = "<your-key-here>"
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
from langchain_openai import OpenAI
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(
input_variables=["image_desc"],
template="Generate a detailed prompt to generate an image based on the following description: {image_desc}",
)
chain = | LLMChain(llm=llm, prompt=prompt) | langchain.chains.LLMChain |
import json
from pprint import pprint
from langchain.globals import set_debug
from langchain_community.llms import NIBittensorLLM
set_debug(True)
llm_sys = | NIBittensorLLM(
system_prompt="Your task is to determine response based on user prompt.Explain me like I am technical lead of a project"
) | langchain_community.llms.NIBittensorLLM |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet redis redisvl langchain-openai tiktoken')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redis_url = "redis://localhost:6379"
redis_url = "redis://:secret@redis:7379/2"
redis_url = "redis://joe:secret@redis/0"
redis_url = "redis+sentinel://localhost:26379"
redis_url = "redis+sentinel://joe:secret@redis"
redis_url = "redis+sentinel://redis:26379/zone-1/2"
redis_url = "rediss://localhost:6379"
redis_url = "rediss+sentinel://localhost"
metadata = [
{
"user": "john",
"age": 18,
"job": "engineer",
"credit_score": "high",
},
{
"user": "derrick",
"age": 45,
"job": "doctor",
"credit_score": "low",
},
{
"user": "nancy",
"age": 94,
"job": "doctor",
"credit_score": "high",
},
{
"user": "tyler",
"age": 100,
"job": "engineer",
"credit_score": "high",
},
{
"user": "joe",
"age": 35,
"job": "dentist",
"credit_score": "medium",
},
]
texts = ["foo", "foo", "foo", "bar", "bar"]
from langchain_community.vectorstores.redis import Redis
rds = Redis.from_texts(
texts,
embeddings,
metadatas=metadata,
redis_url="redis://localhost:6379",
index_name="users",
)
rds.index_name
get_ipython().system('rvl index listall')
get_ipython().system('rvl index info -i users')
get_ipython().system('rvl stats -i users')
results = rds.similarity_search("foo")
print(results[0].page_content)
results = rds.similarity_search("foo", k=3)
meta = results[1].metadata
print("Key of the document in Redis: ", meta.pop("id"))
print("Metadata of the document: ", meta)
results = rds.similarity_search_with_score("foo", k=5)
for result in results:
print(f"Content: {result[0].page_content} --- Score: {result[1]}")
results = rds.similarity_search_with_score("foo", k=5, distance_threshold=0.1)
for result in results:
print(f"Content: {result[0].page_content} --- Score: {result[1]}")
results = rds.similarity_search_with_relevance_scores("foo", k=5)
for result in results:
print(f"Content: {result[0].page_content} --- Similiarity: {result[1]}")
results = rds.similarity_search_with_relevance_scores("foo", k=5, score_threshold=0.9)
for result in results:
print(f"Content: {result[0].page_content} --- Similarity: {result[1]}")
new_document = ["baz"]
new_metadata = [{"user": "sam", "age": 50, "job": "janitor", "credit_score": "high"}]
rds.add_texts(new_document, new_metadata)
results = rds.similarity_search("baz", k=3)
print(results[0].metadata)
results = rds.max_marginal_relevance_search("foo")
results = rds.max_marginal_relevance_search("foo", lambda_mult=0.1)
rds.write_schema("redis_schema.yaml")
new_rds = Redis.from_existing_index(
embeddings,
index_name="users",
redis_url="redis://localhost:6379",
schema="redis_schema.yaml",
)
results = new_rds.similarity_search("foo", k=3)
print(results[0].metadata)
new_rds.schema == rds.schema
index_schema = {
"tag": [{"name": "credit_score"}],
"text": [{"name": "user"}, {"name": "job"}],
"numeric": [{"name": "age"}],
}
rds, keys = Redis.from_texts_return_keys(
texts,
embeddings,
metadatas=metadata,
redis_url="redis://localhost:6379",
index_name="users_modified",
index_schema=index_schema, # pass in the new index schema
)
from langchain_community.vectorstores.redis import RedisText
is_engineer = | RedisText("job") | langchain_community.vectorstores.redis.RedisText |
from langchain.agents import create_spark_sql_agent
from langchain_community.agent_toolkits import SparkSQLToolkit
from langchain_community.utilities.spark_sql import SparkSQL
from langchain_openai import ChatOpenAI
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
schema = "langchain_example"
spark.sql(f"CREATE DATABASE IF NOT EXISTS {schema}")
spark.sql(f"USE {schema}")
csv_file_path = "titanic.csv"
table = "titanic"
spark.read.csv(csv_file_path, header=True, inferSchema=True).write.saveAsTable(table)
spark.table(table).show()
spark_sql = SparkSQL(schema=schema)
llm = ChatOpenAI(temperature=0)
toolkit = | SparkSQLToolkit(db=spark_sql, llm=llm) | langchain_community.agent_toolkits.SparkSQLToolkit |
from langchain.chains import HypotheticalDocumentEmbedder, LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI, OpenAIEmbeddings
base_embeddings = OpenAIEmbeddings()
llm = OpenAI()
embeddings = HypotheticalDocumentEmbedder.from_llm(llm, base_embeddings, "web_search")
result = embeddings.embed_query("Where is the Taj Mahal?")
multi_llm = OpenAI(n=4, best_of=4)
embeddings = HypotheticalDocumentEmbedder.from_llm(
multi_llm, base_embeddings, "web_search"
)
result = embeddings.embed_query("Where is the Taj Mahal?")
prompt_template = """Please answer the user's question about the most recent state of the union address
Question: {question}
Answer:"""
prompt = | PromptTemplate(input_variables=["question"], template=prompt_template) | langchain.prompts.PromptTemplate |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langkit langchain-openai langchain')
from langchain.callbacks import WhyLabsCallbackHandler
from langchain_openai import OpenAI
whylabs = | WhyLabsCallbackHandler.from_params() | langchain.callbacks.WhyLabsCallbackHandler.from_params |
get_ipython().run_line_magic('pip', 'install -qU chromadb langchain langchain-community langchain-openai')
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = TextLoader("../../state_of_the_union.txt")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
for i, doc in enumerate(texts):
doc.metadata["page_chunk"] = i
embeddings = OpenAIEmbeddings()
vectorstore = | Chroma.from_documents(texts, embeddings, collection_name="state-of-union") | langchain_community.vectorstores.Chroma.from_documents |