prompt
stringlengths 51
10k
| completion
stringlengths 8
362
| api
stringlengths 18
90
|
---|---|---|
get_ipython().run_line_magic('pip', 'install --upgrade --quiet airbyte-source-salesforce')
from langchain_community.document_loaders.airbyte import AirbyteSalesforceLoader
config = {
}
loader = AirbyteSalesforceLoader(
config=config, stream_name="asset"
) # check the documentation linked above for a list of all streams
docs = loader.load()
docs_iterator = loader.lazy_load()
from langchain.docstore.document import Document
def handle_record(record, id):
return | Document(page_content=record.data["title"], metadata=record.data) | langchain.docstore.document.Document |
import os
from langchain.chains import ConversationalRetrievalChain
from langchain_community.vectorstores import Vectara
from langchain_openai import OpenAI
from langchain_community.document_loaders import TextLoader
loader = TextLoader("state_of_the_union.txt")
documents = loader.load()
vectara = Vectara.from_documents(documents, embedding=None)
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
openai_api_key = os.environ["OPENAI_API_KEY"]
llm = OpenAI(openai_api_key=openai_api_key, temperature=0)
retriever = vectara.as_retriever()
d = retriever.get_relevant_documents(
"What did the president say about Ketanji Brown Jackson", k=2
)
print(d)
bot = ConversationalRetrievalChain.from_llm(
llm, retriever, memory=memory, verbose=False
)
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query})
result["answer"]
query = "Did he mention who she suceeded"
result = bot.invoke({"question": query})
result["answer"]
bot = ConversationalRetrievalChain.from_llm(
OpenAI(temperature=0), vectara.as_retriever()
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["answer"]
chat_history = [(query, result["answer"])]
query = "Did he mention who she suceeded"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["answer"]
bot = ConversationalRetrievalChain.from_llm(
llm, vectara.as_retriever(), return_source_documents=True
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["source_documents"][0]
from langchain.chains import LLMChain
from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
from langchain.chains.question_answering import load_qa_chain
question_generator = | LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT) | langchain.chains.llm.LLMChain |
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from langchain_community.vectorstores import Chroma
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embedding_function = | SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") | langchain_community.embeddings.sentence_transformer.SentenceTransformerEmbeddings |
from langchain.output_parsers.enum import EnumOutputParser
from enum import Enum
class Colors(Enum):
RED = "red"
GREEN = "green"
BLUE = "blue"
parser = EnumOutputParser(enum=Colors)
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
prompt = | PromptTemplate.from_template(
"""What color eyes does this person have?
> Person: {person}
Instructions: {instructions}"""
) | langchain_core.prompts.PromptTemplate.from_template |
from getpass import getpass
WRITER_API_KEY = getpass()
import os
os.environ["WRITER_API_KEY"] = WRITER_API_KEY
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import Writer
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
llm = | Writer() | langchain_community.llms.Writer |
get_ipython().run_line_magic('pip', 'install -qU langchain-text-splitters')
from langchain_text_splitters import HTMLHeaderTextSplitter
html_string = """
<!DOCTYPE html>
<html>
<body>
<div>
<h1>Foo</h1>
<p>Some intro text about Foo.</p>
<div>
<h2>Bar main section</h2>
<p>Some intro text about Bar.</p>
<h3>Bar subsection 1</h3>
<p>Some text about the first subtopic of Bar.</p>
<h3>Bar subsection 2</h3>
<p>Some text about the second subtopic of Bar.</p>
</div>
<div>
<h2>Baz</h2>
<p>Some text about Baz</p>
</div>
<br>
<p>Some concluding text about Foo</p>
</div>
</body>
</html>
"""
headers_to_split_on = [
("h1", "Header 1"),
("h2", "Header 2"),
("h3", "Header 3"),
]
html_splitter = HTMLHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
html_header_splits = html_splitter.split_text(html_string)
html_header_splits
from langchain_text_splitters import RecursiveCharacterTextSplitter
url = "https://plato.stanford.edu/entries/goedel/"
headers_to_split_on = [
("h1", "Header 1"),
("h2", "Header 2"),
("h3", "Header 3"),
("h4", "Header 4"),
]
html_splitter = HTMLHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
html_header_splits = html_splitter.split_text_from_url(url)
chunk_size = 500
chunk_overlap = 30
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
splits = text_splitter.split_documents(html_header_splits)
splits[80:85]
url = "https://www.cnn.com/2023/09/25/weather/el-nino-winter-us-climate/index.html"
headers_to_split_on = [
("h1", "Header 1"),
("h2", "Header 2"),
]
html_splitter = | HTMLHeaderTextSplitter(headers_to_split_on=headers_to_split_on) | langchain_text_splitters.HTMLHeaderTextSplitter |
from langchain_community.document_loaders import ArcGISLoader
URL = "https://maps1.vcgov.org/arcgis/rest/services/Beaches/MapServer/7"
loader = ArcGISLoader(URL)
docs = loader.load()
get_ipython().run_cell_magic('time', '', '\ndocs = loader.load()\n')
docs[0].metadata
loader_geom = | ArcGISLoader(URL, return_geometry=True) | langchain_community.document_loaders.ArcGISLoader |
from langchain_core.pydantic_v1 import BaseModel, Field
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = | Field(description="The punchline to the joke") | langchain_core.pydantic_v1.Field |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.prompts import PromptTemplate
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0).configurable_fields(
temperature=ConfigurableField(
id="llm_temperature",
name="LLM Temperature",
description="The temperature of the LLM",
)
)
model.invoke("pick a random number")
model.with_config(configurable={"llm_temperature": 0.9}).invoke("pick a random number")
prompt = PromptTemplate.from_template("Pick a random number above {x}")
chain = prompt | model
chain.invoke({"x": 0})
chain.with_config(configurable={"llm_temperature": 0.9}).invoke({"x": 0})
from langchain.runnables.hub import HubRunnable
prompt = HubRunnable("rlm/rag-prompt").configurable_fields(
owner_repo_commit=ConfigurableField(
id="hub_commit",
name="Hub Commit",
description="The Hub commit to pull from",
)
)
prompt.invoke({"question": "foo", "context": "bar"})
prompt.with_config(configurable={"hub_commit": "rlm/rag-prompt-llama"}).invoke(
{"question": "foo", "context": "bar"}
)
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatAnthropic
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
llm = ChatAnthropic(temperature=0).configurable_alternatives(
ConfigurableField(id="llm"),
default_key="anthropic",
openai=ChatOpenAI(),
gpt4=ChatOpenAI(model="gpt-4"),
)
prompt = PromptTemplate.from_template("Tell me a joke about {topic}")
chain = prompt | llm
chain.invoke({"topic": "bears"})
chain.with_config(configurable={"llm": "openai"}).invoke({"topic": "bears"})
chain.with_config(configurable={"llm": "anthropic"}).invoke({"topic": "bears"})
llm = ChatAnthropic(temperature=0)
prompt = PromptTemplate.from_template(
"Tell me a joke about {topic}"
).configurable_alternatives(
ConfigurableField(id="prompt"),
default_key="joke",
poem=PromptTemplate.from_template("Write a short poem about {topic}"),
)
chain = prompt | llm
chain.invoke({"topic": "bears"})
chain.with_config(configurable={"prompt": "poem"}).invoke({"topic": "bears"})
llm = | ChatAnthropic(temperature=0) | langchain_community.chat_models.ChatAnthropic |
from langchain.memory import ConversationKGMemory
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
memory = | ConversationKGMemory(llm=llm) | langchain.memory.ConversationKGMemory |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken langchain-openai python-dotenv datasets langchain deeplake beautifulsoup4 html2text ragas')
ORG_ID = "..."
import getpass
import os
from langchain.chains import RetrievalQA
from langchain.vectorstores.deeplake import DeepLake
from langchain_openai import OpenAIChat, OpenAIEmbeddings
os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI API token: ")
os.environ["ACTIVELOOP_TOKEN"] = getpass.getpass(
"Enter your ActiveLoop API token: "
) # Get your API token from https://app.activeloop.ai, click on your profile picture in the top right corner, and select "API Tokens"
token = os.getenv("ACTIVELOOP_TOKEN")
openai_embeddings = OpenAIEmbeddings()
db = DeepLake(
dataset_path=f"hub://{ORG_ID}/deeplake-docs-deepmemory", # org_id stands for your username or organization from activeloop
embedding=openai_embeddings,
runtime={"tensor_db": True},
token=token,
read_only=False,
)
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
def get_all_links(url):
response = requests.get(url)
if response.status_code != 200:
print(f"Failed to retrieve the page: {url}")
return []
soup = BeautifulSoup(response.content, "html.parser")
links = [
urljoin(url, a["href"]) for a in soup.find_all("a", href=True) if a["href"]
]
return links
base_url = "https://docs.deeplake.ai/en/latest/"
all_links = get_all_links(base_url)
from langchain.document_loaders import AsyncHtmlLoader
loader = AsyncHtmlLoader(all_links)
docs = loader.load()
from langchain.document_transformers import Html2TextTransformer
html2text = Html2TextTransformer()
docs_transformed = html2text.transform_documents(docs)
from langchain_text_splitters import RecursiveCharacterTextSplitter
chunk_size = 4096
docs_new = []
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size,
)
for doc in docs_transformed:
if len(doc.page_content) < chunk_size:
docs_new.append(doc)
else:
docs = text_splitter.create_documents([doc.page_content])
docs_new.extend(docs)
docs = db.add_documents(docs_new)
from typing import List
from langchain.chains.openai_functions import (
create_structured_output_chain,
)
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
docs = db.vectorstore.dataset.text.data(fetch_chunks=True, aslist=True)["value"]
ids = db.vectorstore.dataset.id.data(fetch_chunks=True, aslist=True)["value"]
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
class Questions(BaseModel):
"""Identifying information about a person."""
question: str = Field(..., description="Questions about text")
prompt_msgs = [
SystemMessage(
content="You are a world class expert for generating questions based on provided context. \
You make sure the question can be answered by the text."
),
HumanMessagePromptTemplate.from_template(
"Use the given text to generate a question from the following input: {input}"
),
HumanMessage(content="Tips: Make sure to answer in the correct format"),
]
prompt = ChatPromptTemplate(messages=prompt_msgs)
chain = create_structured_output_chain(Questions, llm, prompt, verbose=True)
text = "# Understanding Hallucinations and Bias ## **Introduction** In this lesson, we'll cover the concept of **hallucinations** in LLMs, highlighting their influence on AI applications and demonstrating how to mitigate them using techniques like the retriever's architectures. We'll also explore **bias** within LLMs with examples."
questions = chain.run(input=text)
print(questions)
import random
from langchain_openai import OpenAIEmbeddings
from tqdm import tqdm
def generate_queries(docs: List[str], ids: List[str], n: int = 100):
questions = []
relevances = []
pbar = tqdm(total=n)
while len(questions) < n:
r = random.randint(0, len(docs) - 1)
text, label = docs[r], ids[r]
generated_qs = [chain.run(input=text).question]
questions.extend(generated_qs)
relevances.extend([[(label, 1)] for _ in generated_qs])
pbar.update(len(generated_qs))
if len(questions) % 10 == 0:
print(f"q: {len(questions)}")
return questions[:n], relevances[:n]
chain = create_structured_output_chain(Questions, llm, prompt, verbose=False)
questions, relevances = generate_queries(docs, ids, n=200)
train_questions, train_relevances = questions[:100], relevances[:100]
test_questions, test_relevances = questions[100:], relevances[100:]
job_id = db.vectorstore.deep_memory.train(
queries=train_questions,
relevance=train_relevances,
)
db.vectorstore.deep_memory.status("6538939ca0b69a9ca45c528c")
recall = db.vectorstore.deep_memory.evaluate(
queries=test_questions,
relevance=test_relevances,
)
from ragas.langchain import RagasEvaluatorChain
from ragas.metrics import (
context_recall,
)
def convert_relevance_to_ground_truth(docs, relevance):
ground_truths = []
for rel in relevance:
ground_truth = []
for doc_id, _ in rel:
ground_truth.append(docs[doc_id])
ground_truths.append(ground_truth)
return ground_truths
ground_truths = convert_relevance_to_ground_truth(docs, test_relevances)
for deep_memory in [False, True]:
print("\nEvaluating with deep_memory =", deep_memory)
print("===================================")
retriever = db.as_retriever()
retriever.search_kwargs["deep_memory"] = deep_memory
qa_chain = RetrievalQA.from_chain_type(
llm=OpenAIChat(model="gpt-3.5-turbo"),
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
)
metrics = {
"context_recall_score": 0,
}
eval_chains = {m.name: RagasEvaluatorChain(metric=m) for m in [context_recall]}
for question, ground_truth in zip(test_questions, ground_truths):
result = qa_chain({"query": question})
result["ground_truths"] = ground_truth
for name, eval_chain in eval_chains.items():
score_name = f"{name}_score"
metrics[score_name] += eval_chain(result)[score_name]
for metric in metrics:
metrics[metric] /= len(test_questions)
print(f"{metric}: {metrics[metric]}")
print("===================================")
retriever = db.as_retriever()
retriever.search_kwargs["deep_memory"] = True
retriever.search_kwargs["k"] = 10
query = "Deamination of cytidine to uridine on the minus strand of viral DNA results in catastrophic G-to-A mutations in the viral genome."
qa = RetrievalQA.from_chain_type(
llm=OpenAIChat(model="gpt-4"), chain_type="stuff", retriever=retriever
)
print(qa.run(query))
retriever = db.as_retriever()
retriever.search_kwargs["deep_memory"] = False
retriever.search_kwargs["k"] = 10
query = "Deamination of cytidine to uridine on the minus strand of viral DNA results in catastrophic G-to-A mutations in the viral genome."
qa = RetrievalQA.from_chain_type(
llm= | OpenAIChat(model="gpt-4") | langchain_openai.OpenAIChat |
from langchain.callbacks import get_openai_callback
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model_name="gpt-4")
with | get_openai_callback() | langchain.callbacks.get_openai_callback |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet meilisearch')
import getpass
import os
os.environ["MEILI_HTTP_ADDR"] = getpass.getpass("Meilisearch HTTP address and port:")
os.environ["MEILI_MASTER_KEY"] = getpass.getpass("Meilisearch API Key:")
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.vectorstores import Meilisearch
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
embeddings = OpenAIEmbeddings()
with open("../../modules/state_of_the_union.txt") as f:
state_of_the_union = f.read()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_text(state_of_the_union)
vector_store = | Meilisearch.from_texts(texts=texts, embedding=embeddings) | langchain_community.vectorstores.Meilisearch.from_texts |
import os
from langchain.chains import ConversationalRetrievalChain
from langchain_community.vectorstores import Vectara
from langchain_openai import OpenAI
from langchain_community.document_loaders import TextLoader
loader = TextLoader("state_of_the_union.txt")
documents = loader.load()
vectara = Vectara.from_documents(documents, embedding=None)
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
openai_api_key = os.environ["OPENAI_API_KEY"]
llm = OpenAI(openai_api_key=openai_api_key, temperature=0)
retriever = vectara.as_retriever()
d = retriever.get_relevant_documents(
"What did the president say about Ketanji Brown Jackson", k=2
)
print(d)
bot = ConversationalRetrievalChain.from_llm(
llm, retriever, memory=memory, verbose=False
)
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query})
result["answer"]
query = "Did he mention who she suceeded"
result = bot.invoke({"question": query})
result["answer"]
bot = ConversationalRetrievalChain.from_llm(
OpenAI(temperature=0), vectara.as_retriever()
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["answer"]
chat_history = [(query, result["answer"])]
query = "Did he mention who she suceeded"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["answer"]
bot = ConversationalRetrievalChain.from_llm(
llm, vectara.as_retriever(), return_source_documents=True
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["source_documents"][0]
from langchain.chains import LLMChain
from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
from langchain.chains.question_answering import load_qa_chain
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = load_qa_chain(llm, chain_type="map_reduce")
chain = ConversationalRetrievalChain(
retriever=vectara.as_retriever(),
question_generator=question_generator,
combine_docs_chain=doc_chain,
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = chain({"question": query, "chat_history": chat_history})
result["answer"]
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = load_qa_with_sources_chain(llm, chain_type="map_reduce")
chain = ConversationalRetrievalChain(
retriever=vectara.as_retriever(),
question_generator=question_generator,
combine_docs_chain=doc_chain,
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = chain({"question": query, "chat_history": chat_history})
result["answer"]
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains.conversational_retrieval.prompts import (
CONDENSE_QUESTION_PROMPT,
QA_PROMPT,
)
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
llm = OpenAI(temperature=0, openai_api_key=openai_api_key)
streaming_llm = OpenAI(
streaming=True,
callbacks=[StreamingStdOutCallbackHandler()],
temperature=0,
openai_api_key=openai_api_key,
)
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = | load_qa_chain(streaming_llm, chain_type="stuff", prompt=QA_PROMPT) | langchain.chains.question_answering.load_qa_chain |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet googlemaps')
import os
os.environ["GPLACES_API_KEY"] = ""
from langchain.tools import GooglePlacesTool
places = | GooglePlacesTool() | langchain.tools.GooglePlacesTool |
import os
os.environ["LANGCHAIN_PROJECT"] = "movie-qa"
import pandas as pd
df = pd.read_csv("data/imdb_top_1000.csv")
df["Released_Year"] = df["Released_Year"].astype(int, errors="ignore")
from langchain.schema import Document
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
records = df.to_dict("records")
documents = [Document(page_content=d["Overview"], metadata=d) for d in records]
vectorstore = Chroma.from_documents(documents, embeddings)
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_openai import ChatOpenAI
metadata_field_info = [
AttributeInfo(
name="Released_Year",
description="The year the movie was released",
type="int",
),
AttributeInfo(
name="Series_Title",
description="The title of the movie",
type="str",
),
AttributeInfo(
name="Genre",
description="The genre of the movie",
type="string",
),
AttributeInfo(
name="IMDB_Rating", description="A 1-10 rating for the movie", type="float"
),
]
document_content_description = "Brief summary of a movie"
llm = ChatOpenAI(temperature=0)
retriever = SelfQueryRetriever.from_llm(
llm, vectorstore, document_content_description, metadata_field_info, verbose=True
)
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_template(
"""Answer the user's question based on the below information:
Information:
{info}
Question: {question}"""
)
generator = (prompt | ChatOpenAI() | StrOutputParser()).with_config(
run_name="generator"
)
chain = (
RunnablePassthrough.assign(info=(lambda x: x["question"]) | retriever) | generator
)
chain.invoke({"question": "what is a horror movie released in early 2000s"})
from langsmith import Client
client = Client()
runs = list(
client.list_runs(
project_name="movie-qa",
execution_order=1,
filter="and(eq(feedback_key, 'correctness'), eq(feedback_score, 1))",
)
)
len(runs)
gen_runs = []
query_runs = []
for r in runs:
gen_runs.extend(
list(
client.list_runs(
project_name="movie-qa",
filter="eq(name, 'generator')",
trace_id=r.trace_id,
)
)
)
query_runs.extend(
list(
client.list_runs(
project_name="movie-qa",
filter="eq(name, 'query_constructor')",
trace_id=r.trace_id,
)
)
)
runs[0].inputs
runs[0].outputs
query_runs[0].inputs
query_runs[0].outputs
gen_runs[0].inputs
gen_runs[0].outputs
client.create_dataset("movie-query_constructor")
inputs = [r.inputs for r in query_runs]
outputs = [r.outputs for r in query_runs]
client.create_examples(
inputs=inputs, outputs=outputs, dataset_name="movie-query_constructor"
)
client.create_dataset("movie-generator")
inputs = [r.inputs for r in gen_runs]
outputs = [r.outputs for r in gen_runs]
client.create_examples(inputs=inputs, outputs=outputs, dataset_name="movie-generator")
examples = list(client.list_examples(dataset_name="movie-query_constructor"))
import json
def filter_to_string(_filter):
if "operator" in _filter:
args = [filter_to_string(f) for f in _filter["arguments"]]
return f"{_filter['operator']}({','.join(args)})"
else:
comparator = _filter["comparator"]
attribute = json.dumps(_filter["attribute"])
value = json.dumps(_filter["value"])
return f"{comparator}({attribute}, {value})"
model_examples = []
for e in examples:
if "filter" in e.outputs["output"]:
string_filter = filter_to_string(e.outputs["output"]["filter"])
else:
string_filter = "NO_FILTER"
model_examples.append(
(
e.inputs["query"],
{"query": e.outputs["output"]["query"], "filter": string_filter},
)
)
retriever1 = SelfQueryRetriever.from_llm(
llm,
vectorstore,
document_content_description,
metadata_field_info,
verbose=True,
chain_kwargs={"examples": model_examples},
)
chain1 = (
| RunnablePassthrough.assign(info=(lambda x: x["question"]) | retriever1) | langchain_core.runnables.RunnablePassthrough.assign |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pymysql')
get_ipython().system('pip install sqlalchemy')
get_ipython().system('pip install langchain')
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import (
DirectoryLoader,
UnstructuredMarkdownLoader,
)
from langchain_community.vectorstores.apache_doris import (
ApacheDoris,
ApacheDorisSettings,
)
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import TokenTextSplitter
update_vectordb = False
loader = DirectoryLoader(
"./docs", glob="**/*.md", loader_cls=UnstructuredMarkdownLoader
)
documents = loader.load()
text_splitter = TokenTextSplitter(chunk_size=400, chunk_overlap=50)
split_docs = text_splitter.split_documents(documents)
update_vectordb = True
def gen_apache_doris(update_vectordb, embeddings, settings):
if update_vectordb:
docsearch = ApacheDoris.from_documents(split_docs, embeddings, config=settings)
else:
docsearch = | ApacheDoris(embeddings, settings) | langchain_community.vectorstores.apache_doris.ApacheDoris |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai context-python')
import os
from langchain.callbacks import ContextCallbackHandler
token = os.environ["CONTEXT_API_TOKEN"]
context_callback = ContextCallbackHandler(token)
import os
from langchain.callbacks import ContextCallbackHandler
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
token = os.environ["CONTEXT_API_TOKEN"]
chat = ChatOpenAI(
headers={"user_id": "123"}, temperature=0, callbacks=[ContextCallbackHandler(token)]
)
messages = [
SystemMessage(
content="You are a helpful assistant that translates English to French."
),
HumanMessage(content="I love programming."),
]
print(chat(messages))
import os
from langchain.callbacks import ContextCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
from langchain_openai import ChatOpenAI
token = os.environ["CONTEXT_API_TOKEN"]
human_message_prompt = HumanMessagePromptTemplate(
prompt=PromptTemplate(
template="What is a good name for a company that makes {product}?",
input_variables=["product"],
)
)
chat_prompt_template = ChatPromptTemplate.from_messages([human_message_prompt])
callback = | ContextCallbackHandler(token) | langchain.callbacks.ContextCallbackHandler |
get_ipython().run_cell_magic('writefile', 'telegram_conversation.json', '{\n "name": "Jiminy",\n "type": "personal_chat",\n "id": 5965280513,\n "messages": [\n {\n "id": 1,\n "type": "message",\n "date": "2023-08-23T13:11:23",\n "date_unixtime": "1692821483",\n "from": "Jiminy Cricket",\n "from_id": "user123450513",\n "text": "You better trust your conscience",\n "text_entities": [\n {\n "type": "plain",\n "text": "You better trust your conscience"\n }\n ]\n },\n {\n "id": 2,\n "type": "message",\n "date": "2023-08-23T13:13:20",\n "date_unixtime": "1692821600",\n "from": "Batman & Robin",\n "from_id": "user6565661032",\n "text": "What did you just say?",\n "text_entities": [\n {\n "type": "plain",\n "text": "What did you just say?"\n }\n ]\n }\n ]\n}\n')
from langchain_community.chat_loaders.telegram import TelegramChatLoader
loader = TelegramChatLoader(
path="./telegram_conversation.json",
)
from typing import List
from langchain_community.chat_loaders.base import ChatSession
from langchain_community.chat_loaders.utils import (
map_ai_messages,
merge_chat_runs,
)
raw_messages = loader.lazy_load()
merged_messages = merge_chat_runs(raw_messages)
messages: List[ChatSession] = list(
| map_ai_messages(merged_messages, sender="Jiminy Cricket") | langchain_community.chat_loaders.utils.map_ai_messages |
import os
os.environ["LANGCHAIN_PROJECT"] = "movie-qa"
import pandas as pd
df = pd.read_csv("data/imdb_top_1000.csv")
df["Released_Year"] = df["Released_Year"].astype(int, errors="ignore")
from langchain.schema import Document
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
records = df.to_dict("records")
documents = [Document(page_content=d["Overview"], metadata=d) for d in records]
vectorstore = | Chroma.from_documents(documents, embeddings) | langchain_community.vectorstores.Chroma.from_documents |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-community langchainhub gpt4all chromadb')
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
from langchain_community.embeddings import GPT4AllEmbeddings
from langchain_community.vectorstores import Chroma
vectorstore = Chroma.from_documents(documents=all_splits, embedding= | GPT4AllEmbeddings() | langchain_community.embeddings.GPT4AllEmbeddings |
import os
import pprint
os.environ["SERPER_API_KEY"] = ""
from langchain_community.utilities import GoogleSerperAPIWrapper
search = GoogleSerperAPIWrapper()
search.run("Obama's first name?")
os.environ["OPENAI_API_KEY"] = ""
from langchain.agents import AgentType, Tool, initialize_agent
from langchain_community.utilities import GoogleSerperAPIWrapper
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
search = GoogleSerperAPIWrapper()
tools = [
Tool(
name="Intermediate Answer",
func=search.run,
description="useful for when you need to ask with search",
)
]
self_ask_with_search = initialize_agent(
tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True
)
self_ask_with_search.run(
"What is the hometown of the reigning men's U.S. Open champion?"
)
search = GoogleSerperAPIWrapper()
results = search.results("Apple Inc.")
pprint.pp(results)
search = | GoogleSerperAPIWrapper(type="images") | langchain_community.utilities.GoogleSerperAPIWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet predictionguard langchain')
import os
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import PredictionGuard
os.environ["OPENAI_API_KEY"] = "<your OpenAI api key>"
os.environ["PREDICTIONGUARD_TOKEN"] = "<your Prediction Guard access token>"
pgllm = PredictionGuard(model="OpenAI-text-davinci-003")
pgllm("Tell me a joke")
template = """Respond to the following query based on the context.
Context: EVERY comment, DM + email suggestion has led us to this EXCITING announcement! 🎉 We have officially added TWO new candle subscription box options! 📦
Exclusive Candle Box - $80
Monthly Candle Box - $45 (NEW!)
Scent of The Month Box - $28 (NEW!)
Head to stories to get ALLL the deets on each box! 👆 BONUS: Save 50% on your first box with code 50OFF! 🎉
Query: {query}
Result: """
prompt = PromptTemplate.from_template(template)
pgllm(prompt.format(query="What kind of post is this?"))
pgllm = PredictionGuard(
model="OpenAI-text-davinci-003",
output={
"type": "categorical",
"categories": ["product announcement", "apology", "relational"],
},
)
pgllm(prompt.format(query="What kind of post is this?"))
pgllm = PredictionGuard(model="OpenAI-text-davinci-003")
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
llm_chain = | LLMChain(prompt=prompt, llm=pgllm, verbose=True) | langchain.chains.LLMChain |
import getpass
import os
os.environ["POLYGON_API_KEY"] = getpass.getpass()
from langchain_community.tools.polygon.financials import PolygonFinancials
from langchain_community.tools.polygon.last_quote import PolygonLastQuote
from langchain_community.tools.polygon.ticker_news import PolygonTickerNews
from langchain_community.utilities.polygon import PolygonAPIWrapper
api_wrapper = PolygonAPIWrapper()
ticker = "AAPL"
last_quote_tool = PolygonLastQuote(api_wrapper=api_wrapper)
last_quote = last_quote_tool.run(ticker)
print(f"Tool output: {last_quote}")
import json
last_quote = last_quote_tool.run(ticker)
last_quote_json = json.loads(last_quote)
latest_price = last_quote_json["p"]
print(f"Latest price for {ticker} is ${latest_price}")
ticker_news_tool = | PolygonTickerNews(api_wrapper=api_wrapper) | langchain_community.tools.polygon.ticker_news.PolygonTickerNews |
import os
os.environ["GOOGLE_CSE_ID"] = ""
os.environ["GOOGLE_API_KEY"] = ""
from langchain.tools import Tool
from langchain_community.utilities import GoogleSearchAPIWrapper
search = GoogleSearchAPIWrapper()
tool = Tool(
name="google_search",
description="Search Google for recent results.",
func=search.run,
)
tool.run("Obama's first name?")
search = GoogleSearchAPIWrapper(k=1)
tool = Tool(
name="I'm Feeling Lucky",
description="Search Google and return the first result.",
func=search.run,
)
tool.run("python")
search = | GoogleSearchAPIWrapper() | langchain_community.utilities.GoogleSearchAPIWrapper |
from langchain.evaluation import load_evaluator
evaluator = load_evaluator("criteria", criteria="conciseness")
from langchain.evaluation import EvaluatorType
evaluator = load_evaluator(EvaluatorType.CRITERIA, criteria="conciseness")
eval_result = evaluator.evaluate_strings(
prediction="What's 2+2? That's an elementary question. The answer you're looking for is that two and two is four.",
input="What's 2+2?",
)
print(eval_result)
evaluator = load_evaluator("labeled_criteria", criteria="correctness")
eval_result = evaluator.evaluate_strings(
input="What is the capital of the US?",
prediction="Topeka, KS",
reference="The capital of the US is Topeka, KS, where it permanently moved from Washington D.C. on May 16, 2023",
)
print(f'With ground truth: {eval_result["score"]}')
from langchain.evaluation import Criteria
list(Criteria)
custom_criterion = {
"numeric": "Does the output contain numeric or mathematical information?"
}
eval_chain = load_evaluator(
EvaluatorType.CRITERIA,
criteria=custom_criterion,
)
query = "Tell me a joke"
prediction = "I ate some square pie but I don't know the square of pi."
eval_result = eval_chain.evaluate_strings(prediction=prediction, input=query)
print(eval_result)
custom_criteria = {
"numeric": "Does the output contain numeric information?",
"mathematical": "Does the output contain mathematical information?",
"grammatical": "Is the output grammatically correct?",
"logical": "Is the output logical?",
}
eval_chain = load_evaluator(
EvaluatorType.CRITERIA,
criteria=custom_criteria,
)
eval_result = eval_chain.evaluate_strings(prediction=prediction, input=query)
print("Multi-criteria evaluation")
print(eval_result)
from langchain.chains.constitutional_ai.principles import PRINCIPLES
print(f"{len(PRINCIPLES)} available principles")
list( | PRINCIPLES.items() | langchain.chains.constitutional_ai.principles.PRINCIPLES.items |
STAGE_BUCKET = "<bucket-name>"
get_ipython().run_cell_magic('bash', ' -s "$STAGE_BUCKET"', '\nrm -rf data\nmkdir -p data\ncd data\necho getting org ontology and sample org instances\nwget http://www.w3.org/ns/org.ttl \nwget https://raw.githubusercontent.com/aws-samples/amazon-neptune-ontology-example-blog/main/data/example_org.ttl \n\necho Copying org ttl to S3\naws s3 cp org.ttl s3://$1/org.ttl\naws s3 cp example_org.ttl s3://$1/example_org.ttl\n')
get_ipython().run_line_magic('load', '-s s3://{STAGE_BUCKET} -f turtle --store-to loadres --run')
get_ipython().run_line_magic('load_status', "{loadres['payload']['loadId']} --errors --details")
EXAMPLES = """
<question>
Find organizations.
</question>
<sparql>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX org: <http://www.w3.org/ns/org#>
select ?org ?orgName where {{
?org rdfs:label ?orgName .
}}
</sparql>
<question>
Find sites of an organization
</question>
<sparql>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX org: <http://www.w3.org/ns/org#>
select ?org ?orgName ?siteName where {{
?org rdfs:label ?orgName .
?org org:hasSite/rdfs:label ?siteName .
}}
</sparql>
<question>
Find suborganizations of an organization
</question>
<sparql>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX org: <http://www.w3.org/ns/org#>
select ?org ?orgName ?subName where {{
?org rdfs:label ?orgName .
?org org:hasSubOrganization/rdfs:label ?subName .
}}
</sparql>
<question>
Find organizational units of an organization
</question>
<sparql>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX org: <http://www.w3.org/ns/org#>
select ?org ?orgName ?unitName where {{
?org rdfs:label ?orgName .
?org org:hasUnit/rdfs:label ?unitName .
}}
</sparql>
<question>
Find members of an organization. Also find their manager, or the member they report to.
</question>
<sparql>
PREFIX org: <http://www.w3.org/ns/org#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
select * where {{
?person rdf:type foaf:Person .
?person org:memberOf ?org .
OPTIONAL {{ ?person foaf:firstName ?firstName . }}
OPTIONAL {{ ?person foaf:family_name ?lastName . }}
OPTIONAL {{ ?person org:reportsTo ??manager }} .
}}
</sparql>
<question>
Find change events, such as mergers and acquisitions, of an organization
</question>
<sparql>
PREFIX org: <http://www.w3.org/ns/org#>
select ?event ?prop ?obj where {{
?org rdfs:label ?orgName .
?event rdf:type org:ChangeEvent .
?event org:originalOrganization ?origOrg .
?event org:resultingOrganization ?resultingOrg .
}}
</sparql>
"""
import boto3
from langchain.chains.graph_qa.neptune_sparql import NeptuneSparqlQAChain
from langchain_community.chat_models import BedrockChat
from langchain_community.graphs import NeptuneRdfGraph
host = "<neptune-host>"
port = "<neptune-port>"
region = "us-east-1" # specify region
graph = NeptuneRdfGraph(
host=host, port=port, use_iam_auth=True, region_name=region, hide_comments=True
)
schema_elements = graph.get_schema_elements
graph.load_from_schema_elements(schema_elements)
bedrock_client = boto3.client("bedrock-runtime")
llm = | BedrockChat(model_id="anthropic.claude-v2", client=bedrock_client) | langchain_community.chat_models.BedrockChat |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet banana-dev')
import os
os.environ["BANANA_API_KEY"] = "YOUR_API_KEY"
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import Banana
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
llm = | Banana(model_key="YOUR_MODEL_KEY", model_url_slug="YOUR_MODEL_URL_SLUG") | langchain_community.llms.Banana |
get_ipython().system('pip install --quiet langchain_experimental langchain_openai')
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
from langchain_experimental.text_splitter import SemanticChunker
from langchain_openai.embeddings import OpenAIEmbeddings
text_splitter = SemanticChunker(OpenAIEmbeddings())
docs = text_splitter.create_documents([state_of_the_union])
print(docs[0].page_content)
text_splitter = SemanticChunker(
OpenAIEmbeddings(), breakpoint_threshold_type="percentile"
)
docs = text_splitter.create_documents([state_of_the_union])
print(docs[0].page_content)
print(len(docs))
text_splitter = SemanticChunker(
OpenAIEmbeddings(), breakpoint_threshold_type="standard_deviation"
)
docs = text_splitter.create_documents([state_of_the_union])
print(docs[0].page_content)
print(len(docs))
text_splitter = SemanticChunker(
| OpenAIEmbeddings() | langchain_openai.embeddings.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain_core.tools import tool
@tool
def complex_tool(int_arg: int, float_arg: float, dict_arg: dict) -> int:
"""Do something complex with a complex tool."""
return int_arg * float_arg
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
model_with_tools = model.bind_tools(
[complex_tool],
tool_choice="complex_tool",
)
from operator import itemgetter
from langchain.output_parsers import JsonOutputKeyToolsParser
from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough
chain = (
model_with_tools
| | JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True) | langchain.output_parsers.JsonOutputKeyToolsParser |
from langchain.chains import LLMMathChain
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
from langchain_core.tools import Tool
from langchain_experimental.plan_and_execute import (
PlanAndExecute,
load_agent_executor,
load_chat_planner,
)
from langchain_openai import ChatOpenAI, OpenAI
search = DuckDuckGoSearchAPIWrapper()
llm = OpenAI(temperature=0)
llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
),
Tool(
name="Calculator",
func=llm_math_chain.run,
description="useful for when you need to answer questions about math",
),
]
model = | ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
from ray import serve
from starlette.requests import Request
@serve.deployment
class LLMServe:
def __init__(self) -> None:
pass
async def __call__(self, request: Request) -> str:
return "Hello World"
deployment = LLMServe.bind()
serve.api.run(deployment)
serve.api.shutdown()
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
from getpass import getpass
OPENAI_API_KEY = getpass()
@serve.deployment
class DeployLLM:
def __init__(self):
llm = OpenAI(openai_api_key=OPENAI_API_KEY)
template = "Question: {question}\n\nAnswer: Let's think step by step."
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
from langchain.prompts import (
ChatPromptTemplate,
FewShotChatMessagePromptTemplate,
)
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
]
example_prompt = ChatPromptTemplate.from_messages(
[
("human", "{input}"),
("ai", "{output}"),
]
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
example_prompt=example_prompt,
examples=examples,
)
print(few_shot_prompt.format())
final_prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a wondrous wizard of math."),
few_shot_prompt,
("human", "{input}"),
]
)
from langchain_community.chat_models import ChatAnthropic
chain = final_prompt | ChatAnthropic(temperature=0.0)
chain.invoke({"input": "What's the square of a triangle?"})
from langchain.prompts import SemanticSimilarityExampleSelector
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
{"input": "2+4", "output": "6"},
{"input": "What did the cow say to the moon?", "output": "nothing at all"},
{
"input": "Write me a poem about the moon",
"output": "One for the moon, and one for me, who are we to talk about the moon?",
},
]
to_vectorize = [" ".join(example.values()) for example in examples]
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_texts(to_vectorize, embeddings, metadatas=examples)
example_selector = SemanticSimilarityExampleSelector(
vectorstore=vectorstore,
k=2,
)
example_selector.select_examples({"input": "horse"})
from langchain.prompts import (
ChatPromptTemplate,
FewShotChatMessagePromptTemplate,
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
input_variables=["input"],
example_selector=example_selector,
example_prompt= | ChatPromptTemplate.from_messages(
[("human", "{input}") | langchain.prompts.ChatPromptTemplate.from_messages |
from langchain_community.graphs import NeptuneGraph
host = "<neptune-host>"
port = 8182
use_https = True
graph = NeptuneGraph(host=host, port=port, use_https=use_https)
from langchain.chains import NeptuneOpenCypherQAChain
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0, model="gpt-4")
chain = | NeptuneOpenCypherQAChain.from_llm(llm=llm, graph=graph) | langchain.chains.NeptuneOpenCypherQAChain.from_llm |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiledb-vector-search')
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import TileDB
from langchain_text_splitters import CharacterTextSplitter
raw_documents = TextLoader("../../modules/state_of_the_union.txt").load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = text_splitter.split_documents(raw_documents)
embeddings = | HuggingFaceEmbeddings() | langchain_community.embeddings.HuggingFaceEmbeddings |
from typing import Any, Dict, List
from langchain.chains import ConversationChain
from langchain.schema import BaseMemory
from langchain_openai import OpenAI
from pydantic import BaseModel
get_ipython().run_line_magic('pip', 'install --upgrade --quiet spacy')
import spacy
nlp = spacy.load("en_core_web_lg")
class SpacyEntityMemory(BaseMemory, BaseModel):
"""Memory class for storing information about entities."""
entities: dict = {}
memory_key: str = "entities"
def clear(self):
self.entities = {}
@property
def memory_variables(self) -> List[str]:
"""Define the variables we are providing to the prompt."""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load the memory variables, in this case the entity key."""
doc = nlp(inputs[list(inputs.keys())[0]])
entities = [
self.entities[str(ent)] for ent in doc.ents if str(ent) in self.entities
]
return {self.memory_key: "\n".join(entities)}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
text = inputs[list(inputs.keys())[0]]
doc = nlp(text)
for ent in doc.ents:
ent_str = str(ent)
if ent_str in self.entities:
self.entities[ent_str] += f"\n{text}"
else:
self.entities[ent_str] = text
from langchain.prompts.prompt import PromptTemplate
template = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. You are provided with information about entities the Human mentions, if relevant.
Relevant entity information:
{entities}
Conversation:
Human: {input}
AI:"""
prompt = | PromptTemplate(input_variables=["entities", "input"], template=template) | langchain.prompts.prompt.PromptTemplate |
get_ipython().system('pip install --quiet langchain_experimental langchain_openai')
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
from langchain_experimental.text_splitter import SemanticChunker
from langchain_openai.embeddings import OpenAIEmbeddings
text_splitter = SemanticChunker( | OpenAIEmbeddings() | langchain_openai.embeddings.OpenAIEmbeddings |
from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import StripeLoader
stripe_loader = StripeLoader("charges")
index = | VectorstoreIndexCreator() | langchain.indexes.VectorstoreIndexCreator |
REGION = "us-central1" # @param {type:"string"}
INSTANCE = "test-instance" # @param {type:"string"}
DB_USER = "sqlserver" # @param {type:"string"}
DB_PASS = "password" # @param {type:"string"}
DATABASE = "test" # @param {type:"string"}
TABLE_NAME = "test-default" # @param {type:"string"}
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-google-cloud-sql-mssql')
from google.colab import auth
auth.authenticate_user()
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
get_ipython().system('gcloud services enable sqladmin.googleapis.com')
from langchain_google_cloud_sql_mssql import MSSQLEngine
engine = MSSQLEngine.from_instance(
project_id=PROJECT_ID,
region=REGION,
instance=INSTANCE,
database=DATABASE,
user=DB_USER,
password=DB_PASS,
)
engine.init_document_table(TABLE_NAME, overwrite_existing=True)
from langchain_core.documents import Document
from langchain_google_cloud_sql_mssql import MSSQLDocumentSaver
test_docs = [
Document(
page_content="Apple Granny Smith 150 0.99 1",
metadata={"fruit_id": 1},
),
Document(
page_content="Banana Cavendish 200 0.59 0",
metadata={"fruit_id": 2},
),
Document(
page_content="Orange Navel 80 1.29 1",
metadata={"fruit_id": 3},
),
]
saver = | MSSQLDocumentSaver(engine=engine, table_name=TABLE_NAME) | langchain_google_cloud_sql_mssql.MSSQLDocumentSaver |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet scann')
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import ScaNN
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
from langchain_community.document_loaders import IFixitLoader
loader = IFixitLoader("https://www.ifixit.com/Teardown/Banana+Teardown/811")
data = loader.load()
data
loader = | IFixitLoader(
"https://www.ifixit.com/Answers/View/318583/My+iPhone+6+is+typing+and+opening+apps+by+itself"
) | langchain_community.document_loaders.IFixitLoader |
import os
from langchain.chains import ConversationalRetrievalChain
from langchain_community.vectorstores import Vectara
from langchain_openai import OpenAI
from langchain_community.document_loaders import TextLoader
loader = TextLoader("state_of_the_union.txt")
documents = loader.load()
vectara = | Vectara.from_documents(documents, embedding=None) | langchain_community.vectorstores.Vectara.from_documents |
from langchain_community.document_loaders.blob_loaders.youtube_audio import (
YoutubeAudioLoader,
)
from langchain_community.document_loaders.generic import GenericLoader
from langchain_community.document_loaders.parsers import (
OpenAIWhisperParser,
OpenAIWhisperParserLocal,
)
get_ipython().run_line_magic('pip', 'install --upgrade --quiet yt_dlp')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pydub')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet librosa')
local = False
urls = ["https://youtu.be/kCc8FmEb1nY", "https://youtu.be/VMj-3S1tku0"]
save_dir = "~/Downloads/YouTube"
if local:
loader = GenericLoader(
YoutubeAudioLoader(urls, save_dir), OpenAIWhisperParserLocal()
)
else:
loader = GenericLoader(YoutubeAudioLoader(urls, save_dir), OpenAIWhisperParser())
docs = loader.load()
docs[0].page_content[0:500]
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import FAISS
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
combined_docs = [doc.page_content for doc in docs]
text = " ".join(combined_docs)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=150)
splits = text_splitter.split_text(text)
embeddings = OpenAIEmbeddings()
vectordb = | FAISS.from_texts(splits, embeddings) | langchain_community.vectorstores.FAISS.from_texts |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-community')
import os
os.environ["YDC_API_KEY"] = ""
os.environ["OPENAI_API_KEY"] = ""
from langchain_community.tools.you import YouSearchTool
from langchain_community.utilities.you import YouSearchAPIWrapper
api_wrapper = | YouSearchAPIWrapper(num_web_results=1) | langchain_community.utilities.you.YouSearchAPIWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai context-python')
import os
from langchain.callbacks import ContextCallbackHandler
token = os.environ["CONTEXT_API_TOKEN"]
context_callback = ContextCallbackHandler(token)
import os
from langchain.callbacks import ContextCallbackHandler
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
token = os.environ["CONTEXT_API_TOKEN"]
chat = ChatOpenAI(
headers={"user_id": "123"}, temperature=0, callbacks=[ContextCallbackHandler(token)]
)
messages = [
SystemMessage(
content="You are a helpful assistant that translates English to French."
),
| HumanMessage(content="I love programming.") | langchain.schema.HumanMessage |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sqlite-vss')
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from langchain_community.vectorstores import SQLiteVSS
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
texts = [doc.page_content for doc in docs]
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
db = SQLiteVSS.from_texts(
texts=texts,
embedding=embedding_function,
table="state_union",
db_file="/tmp/vss.db",
)
query = "What did the president say about Ketanji Brown Jackson"
data = db.similarity_search(query)
data[0].page_content
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from langchain_community.vectorstores import SQLiteVSS
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
texts = [doc.page_content for doc in docs]
embedding_function = | SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") | langchain_community.embeddings.sentence_transformer.SentenceTransformerEmbeddings |
get_ipython().system('pip install -U oci')
from langchain_community.llms import OCIGenAI
llm = OCIGenAI(
model_id="MY_MODEL",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="MY_OCID",
)
response = llm.invoke("Tell me one fact about earth", temperature=0.7)
print(response)
from langchain.chains import LLMChain
from langchain_core.prompts import PromptTemplate
llm = OCIGenAI(
model_id="MY_MODEL",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="MY_OCID",
auth_type="SECURITY_TOKEN",
auth_profile="MY_PROFILE", # replace with your profile name
model_kwargs={"temperature": 0.7, "top_p": 0.75, "max_tokens": 200},
)
prompt = PromptTemplate(input_variables=["query"], template="{query}")
llm_chain = LLMChain(llm=llm, prompt=prompt)
response = llm_chain.invoke("what is the capital of france?")
print(response)
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough
from langchain_community.embeddings import OCIGenAIEmbeddings
from langchain_community.vectorstores import FAISS
embeddings = OCIGenAIEmbeddings(
model_id="MY_EMBEDDING_MODEL",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="MY_OCID",
)
vectorstore = FAISS.from_texts(
[
"Larry Ellison co-founded Oracle Corporation in 1977 with Bob Miner and Ed Oates.",
"Oracle Corporation is an American multinational computer technology company headquartered in Austin, Texas, United States.",
],
embedding=embeddings,
)
retriever = vectorstore.as_retriever()
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = PromptTemplate.from_template(template)
llm = OCIGenAI(
model_id="MY_MODEL",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="MY_OCID",
)
chain = (
{"context": retriever, "question": | RunnablePassthrough() | langchain.schema.runnable.RunnablePassthrough |
from langchain.chains import LLMMathChain
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
from langchain_core.tools import Tool
from langchain_experimental.plan_and_execute import (
PlanAndExecute,
load_agent_executor,
load_chat_planner,
)
from langchain_openai import ChatOpenAI, OpenAI
search = DuckDuckGoSearchAPIWrapper()
llm = OpenAI(temperature=0)
llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
),
Tool(
name="Calculator",
func=llm_math_chain.run,
description="useful for when you need to answer questions about math",
),
]
model = ChatOpenAI(temperature=0)
planner = load_chat_planner(model)
executor = | load_agent_executor(model, tools, verbose=True) | langchain_experimental.plan_and_execute.load_agent_executor |
import kuzu
db = kuzu.Database("test_db")
conn = kuzu.Connection(db)
conn.execute("CREATE NODE TABLE Movie (name STRING, PRIMARY KEY(name))")
conn.execute(
"CREATE NODE TABLE Person (name STRING, birthDate STRING, PRIMARY KEY(name))"
)
conn.execute("CREATE REL TABLE ActedIn (FROM Person TO Movie)")
conn.execute("CREATE (:Person {name: 'Al Pacino', birthDate: '1940-04-25'})")
conn.execute("CREATE (:Person {name: 'Robert De Niro', birthDate: '1943-08-17'})")
conn.execute("CREATE (:Movie {name: 'The Godfather'})")
conn.execute("CREATE (:Movie {name: 'The Godfather: Part II'})")
conn.execute(
"CREATE (:Movie {name: 'The Godfather Coda: The Death of Michael Corleone'})"
)
conn.execute(
"MATCH (p:Person), (m:Movie) WHERE p.name = 'Al Pacino' AND m.name = 'The Godfather' CREATE (p)-[:ActedIn]->(m)"
)
conn.execute(
"MATCH (p:Person), (m:Movie) WHERE p.name = 'Al Pacino' AND m.name = 'The Godfather: Part II' CREATE (p)-[:ActedIn]->(m)"
)
conn.execute(
"MATCH (p:Person), (m:Movie) WHERE p.name = 'Al Pacino' AND m.name = 'The Godfather Coda: The Death of Michael Corleone' CREATE (p)-[:ActedIn]->(m)"
)
conn.execute(
"MATCH (p:Person), (m:Movie) WHERE p.name = 'Robert De Niro' AND m.name = 'The Godfather: Part II' CREATE (p)-[:ActedIn]->(m)"
)
from langchain.chains import KuzuQAChain
from langchain_community.graphs import KuzuGraph
from langchain_openai import ChatOpenAI
graph = | KuzuGraph(db) | langchain_community.graphs.KuzuGraph |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai context-python')
import os
from langchain.callbacks import ContextCallbackHandler
token = os.environ["CONTEXT_API_TOKEN"]
context_callback = ContextCallbackHandler(token)
import os
from langchain.callbacks import ContextCallbackHandler
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
token = os.environ["CONTEXT_API_TOKEN"]
chat = ChatOpenAI(
headers={"user_id": "123"}, temperature=0, callbacks=[ContextCallbackHandler(token)]
)
messages = [
SystemMessage(
content="You are a helpful assistant that translates English to French."
),
HumanMessage(content="I love programming."),
]
print(chat(messages))
import os
from langchain.callbacks import ContextCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
from langchain_openai import ChatOpenAI
token = os.environ["CONTEXT_API_TOKEN"]
human_message_prompt = HumanMessagePromptTemplate(
prompt=PromptTemplate(
template="What is a good name for a company that makes {product}?",
input_variables=["product"],
)
)
chat_prompt_template = ChatPromptTemplate.from_messages([human_message_prompt])
callback = ContextCallbackHandler(token)
chat = ChatOpenAI(temperature=0.9, callbacks=[callback])
chain = | LLMChain(llm=chat, prompt=chat_prompt_template, callbacks=[callback]) | langchain.chains.LLMChain |
from typing import Optional
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_experimental.autonomous_agents import BabyAGI
from langchain_openai import OpenAI, OpenAIEmbeddings
get_ipython().run_line_magic('pip', 'install faiss-cpu > /dev/null')
get_ipython().run_line_magic('pip', 'install google-search-results > /dev/null')
from langchain.docstore import InMemoryDocstore
from langchain_community.vectorstores import FAISS
embeddings_model = OpenAIEmbeddings()
import faiss
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.chains import LLMChain
from langchain_community.utilities import SerpAPIWrapper
from langchain_openai import OpenAI
todo_prompt = PromptTemplate.from_template(
"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}"
)
todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt)
search = | SerpAPIWrapper() | langchain_community.utilities.SerpAPIWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-text-splitters tiktoken')
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
from langchain_text_splitters import CharacterTextSplitter
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
chunk_size=100, chunk_overlap=0
)
texts = text_splitter.split_text(state_of_the_union)
print(texts[0])
from langchain_text_splitters import TokenTextSplitter
text_splitter = | TokenTextSplitter(chunk_size=10, chunk_overlap=0) | langchain_text_splitters.TokenTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet semanticscholar')
from langchain import hub
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain_openai import ChatOpenAI
instructions = """You are an expert researcher."""
base_prompt = hub.pull("langchain-ai/openai-functions-template")
prompt = base_prompt.partial(instructions=instructions)
llm = ChatOpenAI(temperature=0)
from langchain_community.tools.semanticscholar.tool import SemanticScholarQueryRun
tools = [ | SemanticScholarQueryRun() | langchain_community.tools.semanticscholar.tool.SemanticScholarQueryRun |
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools import BaseTool, StructuredTool, tool
@tool
def search(query: str) -> str:
"""Look up things online."""
return "LangChain"
print(search.name)
print(search.description)
print(search.args)
@tool
def multiply(a: int, b: int) -> int:
"""Multiply two numbers."""
return a * b
print(multiply.name)
print(multiply.description)
print(multiply.args)
class SearchInput(BaseModel):
query: str = Field(description="should be a search query")
@tool("search-tool", args_schema=SearchInput, return_direct=True)
def search(query: str) -> str:
"""Look up things online."""
return "LangChain"
print(search.name)
print(search.description)
print(search.args)
print(search.return_direct)
from typing import Optional, Type
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
class SearchInput(BaseModel):
query: str = Field(description="should be a search query")
class CalculatorInput(BaseModel):
a: int = | Field(description="first number") | langchain.pydantic_v1.Field |
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.prompts import PromptTemplate
from langchain_community.llms import TitanTakeoffPro
llm = TitanTakeoffPro()
output = llm("What is the weather in London in August?")
print(output)
llm = TitanTakeoffPro(
base_url="http://localhost:3000",
min_new_tokens=128,
max_new_tokens=512,
no_repeat_ngram_size=2,
sampling_topk=1,
sampling_topp=1.0,
sampling_temperature=1.0,
repetition_penalty=1.0,
regex_string="",
)
output = llm("What is the largest rainforest in the world?")
print(output)
llm = TitanTakeoffPro()
rich_output = llm.generate(["What is Deep Learning?", "What is Machine Learning?"])
print(rich_output.generations)
llm = TitanTakeoffPro(
streaming=True, callback_manager=CallbackManager([ | StreamingStdOutCallbackHandler() | langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-search-results')
import os
from langchain_community.tools.google_finance import GoogleFinanceQueryRun
from langchain_community.utilities.google_finance import GoogleFinanceAPIWrapper
os.environ["SERPAPI_API_KEY"] = ""
tool = GoogleFinanceQueryRun(api_wrapper=GoogleFinanceAPIWrapper())
tool.run("Google")
import os
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain_openai import OpenAI
os.environ["OPENAI_API_KEY"] = ""
os.environ["SERP_API_KEY"] = ""
llm = | OpenAI() | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet trubrics')
import os
os.environ["TRUBRICS_EMAIL"] = "***@***"
os.environ["TRUBRICS_PASSWORD"] = "***"
os.environ["OPENAI_API_KEY"] = "sk-***"
from langchain.callbacks import TrubricsCallbackHandler
from langchain_openai import OpenAI
llm = OpenAI(callbacks=[ | TrubricsCallbackHandler() | langchain.callbacks.TrubricsCallbackHandler |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet annoy')
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Annoy
embeddings_func = | HuggingFaceEmbeddings() | langchain_community.embeddings.HuggingFaceEmbeddings |
REGION = "us-central1" # @param {type:"string"}
INSTANCE = "test-instance" # @param {type:"string"}
DB_USER = "sqlserver" # @param {type:"string"}
DB_PASS = "password" # @param {type:"string"}
DATABASE = "test" # @param {type:"string"}
TABLE_NAME = "test-default" # @param {type:"string"}
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-google-cloud-sql-mssql')
from google.colab import auth
auth.authenticate_user()
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
get_ipython().system('gcloud services enable sqladmin.googleapis.com')
from langchain_google_cloud_sql_mssql import MSSQLEngine
engine = MSSQLEngine.from_instance(
project_id=PROJECT_ID,
region=REGION,
instance=INSTANCE,
database=DATABASE,
user=DB_USER,
password=DB_PASS,
)
engine.init_document_table(TABLE_NAME, overwrite_existing=True)
from langchain_core.documents import Document
from langchain_google_cloud_sql_mssql import MSSQLDocumentSaver
test_docs = [
Document(
page_content="Apple Granny Smith 150 0.99 1",
metadata={"fruit_id": 1},
),
Document(
page_content="Banana Cavendish 200 0.59 0",
metadata={"fruit_id": 2},
),
Document(
page_content="Orange Navel 80 1.29 1",
metadata={"fruit_id": 3},
),
]
saver = MSSQLDocumentSaver(engine=engine, table_name=TABLE_NAME)
saver.add_documents(test_docs)
from langchain_google_cloud_sql_mssql import MSSQLLoader
loader = MSSQLLoader(engine=engine, table_name=TABLE_NAME)
docs = loader.lazy_load()
for doc in docs:
print("Loaded documents:", doc)
from langchain_google_cloud_sql_mssql import MSSQLLoader
loader = MSSQLLoader(
engine=engine,
query=f"select * from \"{TABLE_NAME}\" where JSON_VALUE(langchain_metadata, '$.fruit_id') = 1;",
)
onedoc = loader.load()
onedoc
from langchain_google_cloud_sql_mssql import MSSQLLoader
loader = | MSSQLLoader(engine=engine, table_name=TABLE_NAME) | langchain_google_cloud_sql_mssql.MSSQLLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet playwright > /dev/null')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lxml')
from langchain_community.agent_toolkits import PlayWrightBrowserToolkit
from langchain_community.tools.playwright.utils import (
create_async_playwright_browser, # A synchronous browser is available, though it isn't compatible with jupyter.\n", },
)
import nest_asyncio
nest_asyncio.apply()
async_browser = | create_async_playwright_browser() | langchain_community.tools.playwright.utils.create_async_playwright_browser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pygithub')
import os
from langchain.agents import AgentType, initialize_agent
from langchain_community.agent_toolkits.github.toolkit import GitHubToolkit
from langchain_community.utilities.github import GitHubAPIWrapper
from langchain_openai import ChatOpenAI
os.environ["GITHUB_APP_ID"] = "123456"
os.environ["GITHUB_APP_PRIVATE_KEY"] = "path/to/your/private-key.pem"
os.environ["GITHUB_REPOSITORY"] = "username/repo-name"
os.environ["GITHUB_BRANCH"] = "bot-branch-name"
os.environ["GITHUB_BASE_BRANCH"] = "main"
os.environ["OPENAI_API_KEY"] = ""
llm = ChatOpenAI(temperature=0, model="gpt-4-1106-preview")
github = GitHubAPIWrapper()
toolkit = GitHubToolkit.from_github_api_wrapper(github)
tools = toolkit.get_tools()
agent = initialize_agent(
tools,
llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
print("Available tools:")
for tool in tools:
print("\t" + tool.name)
agent.run(
"You have the software engineering capabilities of a Google Principle engineer. You are tasked with completing issues on a github repository. Please look at the existing issues and complete them."
)
from langchain import hub
gh_issue_prompt_template = hub.pull("kastanday/new-github-issue")
print(gh_issue_prompt_template.template)
def format_issue(issue):
title = f"Title: {issue.get('title')}."
opened_by = f"Opened by user: {issue.get('opened_by')}"
body = f"Body: {issue.get('body')}"
comments = issue.get("comments") # often too long
return "\n".join([title, opened_by, body])
issue = github.get_issue(33) # task to implement a RNA-seq pipeline (bioinformatics)
final_gh_issue_prompt = gh_issue_prompt_template.format(
issue_description=format_issue(issue)
)
print(final_gh_issue_prompt)
from langchain.memory.summary_buffer import ConversationSummaryBufferMemory
from langchain_core.prompts.chat import MessagesPlaceholder
summarizer_llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo") # type: ignore
chat_history = MessagesPlaceholder(variable_name="chat_history")
memory = ConversationSummaryBufferMemory(
memory_key="chat_history",
return_messages=True,
llm=summarizer_llm,
max_token_limit=2_000,
)
agent = initialize_agent(
tools,
llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True, # or pass a function that accepts the error and returns a string
max_iterations=30,
max_execution_time=None,
early_stopping_method="generate",
memory=memory,
agent_kwargs={
"memory_prompts": [chat_history],
"input_variables": ["input", "agent_scratchpad", "chat_history"],
"prefix": final_gh_issue_prompt,
},
)
from langchain_core.tracers.context import tracing_v2_enabled
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ["LANGCHAIN_API_KEY"] = "ls__......"
os.environ["LANGCHAIN_PROJECT"] = "Github_Demo_PR"
os.environ["LANGCHAIN_WANDB_TRACING"] = "false"
with tracing_v2_enabled(project_name="Github_Demo_PR", tags=["PR_bot"]) as cb:
agent.run(final_gh_issue_prompt)
from langchain.tools.render import render_text_description_and_args
print(render_text_description_and_args(tools))
get_ipython().run_line_magic('pip', 'install --upgrade --quiet duckduckgo-search')
from langchain.agents import Tool
from langchain.tools import DuckDuckGoSearchRun
from langchain_openai import ChatOpenAI
tools = []
unwanted_tools = ["Get Issue", "Delete File", "Create File", "Create Pull Request"]
for tool in toolkit.get_tools():
if tool.name not in unwanted_tools:
tools.append(tool)
tools += [
Tool(
name="Search",
func= | DuckDuckGoSearchRun() | langchain.tools.DuckDuckGoSearchRun |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet O365')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet beautifulsoup4 # This is optional but is useful for parsing HTML messages')
from langchain_community.agent_toolkits import O365Toolkit
toolkit = | O365Toolkit() | langchain_community.agent_toolkits.O365Toolkit |
get_ipython().system(' nomic login')
get_ipython().system(' nomic login token')
get_ipython().system(' pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain')
import os
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ["LANGCHAIN_API_KEY"] = "api_key"
from langchain_community.document_loaders import WebBaseLoader
urls = [
"https://lilianweng.github.io/posts/2023-06-23-agent/",
"https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/",
"https://lilianweng.github.io/posts/2023-10-25-adv-attack-llm/",
]
docs = [ | WebBaseLoader(url) | langchain_community.document_loaders.WebBaseLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet rank_bm25 > /dev/null')
from langchain.retrievers import BM25Retriever, EnsembleRetriever
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
doc_list_1 = [
"I like apples",
"I like oranges",
"Apples and oranges are fruits",
]
bm25_retriever = BM25Retriever.from_texts(
doc_list_1, metadatas=[{"source": 1}] * len(doc_list_1)
)
bm25_retriever.k = 2
doc_list_2 = [
"You like apples",
"You like oranges",
]
embedding = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
from langchain_community.tools.edenai import (
EdenAiExplicitImageTool,
EdenAiObjectDetectionTool,
EdenAiParsingIDTool,
EdenAiParsingInvoiceTool,
EdenAiSpeechToTextTool,
EdenAiTextModerationTool,
EdenAiTextToSpeechTool,
)
from langchain.agents import AgentType, initialize_agent
from langchain_community.llms import EdenAI
llm = EdenAI(
feature="text", provider="openai", params={"temperature": 0.2, "max_tokens": 250}
)
tools = [
EdenAiTextModerationTool(providers=["openai"], language="en"),
EdenAiObjectDetectionTool(providers=["google", "api4ai"]),
| EdenAiTextToSpeechTool(providers=["amazon"], language="en", voice="MALE") | langchain_community.tools.edenai.EdenAiTextToSpeechTool |
from langchain import hub
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain_community.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_openai import ChatOpenAI
api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)
tool = | WikipediaQueryRun(api_wrapper=api_wrapper) | langchain_community.tools.WikipediaQueryRun |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic')
import os
import boto3
comprehend_client = boto3.client("comprehend", region_name="us-east-1")
from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain
comprehend_moderation = AmazonComprehendModerationChain(
client=comprehend_client,
verbose=True, # optional
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comprehend_moderation
| {"input": (lambda x: x["output"]) | llm}
| comprehend_moderation
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?"
}
)
except ModerationPiiError as e:
print(str(e))
else:
print(response["output"])
from langchain_experimental.comprehend_moderation import (
BaseModerationConfig,
ModerationPiiConfig,
ModerationPromptSafetyConfig,
ModerationToxicityConfig,
)
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.5)
moderation_config = BaseModerationConfig(
filters=[pii_config, toxicity_config, prompt_safety_config]
)
comp_moderation_with_config = AmazonComprehendModerationChain(
moderation_config=moderation_config, # specify the configuration
client=comprehend_client, # optionally pass the Boto3 Client
verbose=True,
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comp_moderation_with_config
| {"input": (lambda x: x["output"]) | llm}
| comp_moderation_with_config
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-45-7890. Can you give me some more samples?"
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])
from langchain_experimental.comprehend_moderation import BaseModerationCallbackHandler
class MyModCallback(BaseModerationCallbackHandler):
async def on_after_pii(self, output_beacon, unique_id):
import json
moderation_type = output_beacon["moderation_type"]
chain_id = output_beacon["moderation_chain_id"]
with open(f"output-{moderation_type}-{chain_id}.json", "w") as file:
data = {"beacon_data": output_beacon, "unique_id": unique_id}
json.dump(data, file)
"""
async def on_after_toxicity(self, output_beacon, unique_id):
pass
async def on_after_prompt_safety(self, output_beacon, unique_id):
pass
"""
my_callback = MyModCallback()
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
moderation_config = BaseModerationConfig(filters=[pii_config, toxicity_config])
comp_moderation_with_config = AmazonComprehendModerationChain(
moderation_config=moderation_config, # specify the configuration
client=comprehend_client, # optionally pass the Boto3 Client
unique_id="john.doe@email.com", # A unique ID
moderation_callback=my_callback, # BaseModerationCallbackHandler
verbose=True,
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comp_moderation_with_config
| {"input": (lambda x: x["output"]) | llm}
| comp_moderation_with_config
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-456-7890. Can you give me some more samples?"
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])
get_ipython().run_line_magic('pip', 'install --upgrade --quiet huggingface_hub')
import os
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "<YOUR HF TOKEN HERE>"
repo_id = "google/flan-t5-xxl"
from langchain.prompts import PromptTemplate
from langchain_community.llms import HuggingFaceHub
template = """{question}"""
prompt = PromptTemplate.from_template(template)
llm = HuggingFaceHub(
repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 256}
)
pii_config = ModerationPiiConfig(
labels=["SSN", "CREDIT_DEBIT_NUMBER"], redact=True, mask_character="X"
)
toxicity_config = ModerationToxicityConfig(threshold=0.5)
prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.8)
moderation_config_1 = BaseModerationConfig(
filters=[pii_config, toxicity_config, prompt_safety_config]
)
moderation_config_2 = BaseModerationConfig(filters=[pii_config])
amazon_comp_moderation = AmazonComprehendModerationChain(
moderation_config=moderation_config_1,
client=comprehend_client,
moderation_callback=my_callback,
verbose=True,
)
amazon_comp_moderation_out = AmazonComprehendModerationChain(
moderation_config=moderation_config_2, client=comprehend_client, verbose=True
)
chain = (
prompt
| amazon_comp_moderation
| {"input": (lambda x: x["output"]) | llm}
| amazon_comp_moderation_out
)
try:
response = chain.invoke(
{
"question": """What is John Doe's address, phone number and SSN from the following text?
John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at johndoe@example.com reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place.
"""
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])
endpoint_name = "<SAGEMAKER_ENDPOINT_NAME>" # replace with your SageMaker Endpoint name
region = "<REGION>" # replace with your SageMaker Endpoint region
import json
from langchain.prompts import PromptTemplate
from langchain_community.llms import SagemakerEndpoint
from langchain_community.llms.sagemaker_endpoint import LLMContentHandler
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: dict) -> bytes:
input_str = json.dumps({"text_inputs": prompt, **model_kwargs})
return input_str.encode("utf-8")
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json["generated_texts"][0]
content_handler = ContentHandler()
template = """From the following 'Document', precisely answer the 'Question'. Do not add any spurious information in your answer.
Document: John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at johndoe@example.com reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place.
Question: {question}
Answer:
"""
llm_prompt = PromptTemplate.from_template(template)
llm = SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region,
model_kwargs={
"temperature": 0.95,
"max_length": 200,
"num_return_sequences": 3,
"top_k": 50,
"top_p": 0.95,
"do_sample": True,
},
content_handler=content_handler,
)
pii_config = | ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") | langchain_experimental.comprehend_moderation.ModerationPiiConfig |
from langchain.chains import HypotheticalDocumentEmbedder, LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI, OpenAIEmbeddings
base_embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-openai langchain-anthropic langchain-community wikipedia')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
os.environ["ANTHROPIC_API_KEY"] = getpass.getpass()
from langchain_community.retrievers import WikipediaRetriever
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
wiki = WikipediaRetriever(top_k_results=6, doc_content_chars_max=2000)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\n\nHere are the Wikipedia articles:{context}",
),
("human", "{question}"),
]
)
prompt.pretty_print()
from operator import itemgetter
from typing import List
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
def format_docs(docs: List[Document]) -> str:
"""Convert Documents to a single string.:"""
formatted = [
f"Article Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for doc in docs
]
return "\n\n" + "\n\n".join(formatted)
format = itemgetter("docs") | RunnableLambda(format_docs)
answer = prompt | llm | StrOutputParser()
chain = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format)
.assign(answer=answer)
.pick(["answer", "docs"])
)
chain.invoke("How fast are cheetahs?")
from langchain_core.pydantic_v1 import BaseModel, Field
class cited_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[int] = Field(
...,
description="The integer IDs of the SPECIFIC sources which justify the answer.",
)
llm_with_tool = llm.bind_tools(
[cited_answer],
tool_choice="cited_answer",
)
example_q = """What Brian's height?
Source: 1
Information: Suzy is 6'2"
Source: 2
Information: Jeremiah is blonde
Source: 3
Information: Brian is 3 inches shorted than Suzy"""
llm_with_tool.invoke(example_q)
from langchain.output_parsers.openai_tools import JsonOutputKeyToolsParser
output_parser = JsonOutputKeyToolsParser(key_name="cited_answer", return_single=True)
(llm_with_tool | output_parser).invoke(example_q)
def format_docs_with_id(docs: List[Document]) -> str:
formatted = [
f"Source ID: {i}\nArticle Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for i, doc in enumerate(docs)
]
return "\n\n" + "\n\n".join(formatted)
format_1 = itemgetter("docs") | RunnableLambda(format_docs_with_id)
answer_1 = prompt | llm_with_tool | output_parser
chain_1 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_1)
.assign(cited_answer=answer_1)
.pick(["cited_answer", "docs"])
)
chain_1.invoke("How fast are cheetahs?")
class Citation(BaseModel):
source_id: int = Field(
...,
description="The integer ID of a SPECIFIC source which justifies the answer.",
)
quote: str = Field(
...,
description="The VERBATIM quote from the specified source that justifies the answer.",
)
class quoted_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[Citation] = Field(
..., description="Citations from the given sources that justify the answer."
)
output_parser_2 = JsonOutputKeyToolsParser(key_name="quoted_answer", return_single=True)
llm_with_tool_2 = llm.bind_tools(
[quoted_answer],
tool_choice="quoted_answer",
)
format_2 = itemgetter("docs") | RunnableLambda(format_docs_with_id)
answer_2 = prompt | llm_with_tool_2 | output_parser_2
chain_2 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_2)
.assign(quoted_answer=answer_2)
.pick(["quoted_answer", "docs"])
)
chain_2.invoke("How fast are cheetahs?")
from langchain_anthropic import ChatAnthropicMessages
anthropic = | ChatAnthropicMessages(model_name="claude-instant-1.2") | langchain_anthropic.ChatAnthropicMessages |
get_ipython().system(' pip install langchain replicate')
from langchain_community.chat_models import ChatOllama
llama2_chat = ChatOllama(model="llama2:13b-chat")
llama2_code = ChatOllama(model="codellama:7b-instruct")
from langchain_community.llms import Replicate
replicate_id = "meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d"
llama2_chat_replicate = Replicate(
model=replicate_id, input={"temperature": 0.01, "max_length": 500, "top_p": 1}
)
llm = llama2_chat
from langchain_community.utilities import SQLDatabase
db = SQLDatabase.from_uri("sqlite:///nba_roster.db", sample_rows_in_table_info=0)
def get_schema(_):
return db.get_table_info()
def run_query(query):
return db.run(query)
from langchain_core.prompts import ChatPromptTemplate
template = """Based on the table schema below, write a SQL query that would answer the user's question:
{schema}
Question: {question}
SQL Query:"""
prompt = ChatPromptTemplate.from_messages(
[
("system", "Given an input question, convert it to a SQL query. No pre-amble."),
("human", template),
]
)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
sql_response = (
RunnablePassthrough.assign(schema=get_schema)
| prompt
| llm.bind(stop=["\nSQLResult:"])
| StrOutputParser()
)
sql_response.invoke({"question": "What team is Klay Thompson on?"})
template = """Based on the table schema below, question, sql query, and sql response, write a natural language response:
{schema}
Question: {question}
SQL Query: {query}
SQL Response: {response}"""
prompt_response = ChatPromptTemplate.from_messages(
[
(
"system",
"Given an input question and SQL response, convert it to a natural language answer. No pre-amble.",
),
("human", template),
]
)
full_chain = (
| RunnablePassthrough.assign(query=sql_response) | langchain_core.runnables.RunnablePassthrough.assign |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet text-generation transformers google-search-results numexpr langchainhub sentencepiece jinja2')
import os
from langchain_community.llms import HuggingFaceTextGenInference
ENDPOINT_URL = "<YOUR_ENDPOINT_URL_HERE>"
HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
llm = HuggingFaceTextGenInference(
inference_server_url=ENDPOINT_URL,
max_new_tokens=512,
top_k=50,
temperature=0.1,
repetition_penalty=1.03,
server_kwargs={
"headers": {
"Authorization": f"Bearer {HF_TOKEN}",
"Content-Type": "application/json",
}
},
)
from langchain_community.llms import HuggingFaceEndpoint
ENDPOINT_URL = "<YOUR_ENDPOINT_URL_HERE>"
llm = HuggingFaceEndpoint(
endpoint_url=ENDPOINT_URL,
task="text-generation",
model_kwargs={
"max_new_tokens": 512,
"top_k": 50,
"temperature": 0.1,
"repetition_penalty": 1.03,
},
)
from langchain_community.llms import HuggingFaceHub
llm = HuggingFaceHub(
repo_id="HuggingFaceH4/zephyr-7b-beta",
task="text-generation",
model_kwargs={
"max_new_tokens": 512,
"top_k": 30,
"temperature": 0.1,
"repetition_penalty": 1.03,
},
)
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_community.chat_models.huggingface import ChatHuggingFace
messages = [
| SystemMessage(content="You're a helpful assistant") | langchain.schema.SystemMessage |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_community.chat_models import ChatAnthropic
from langchain_openai import ChatOpenAI
from unittest.mock import patch
import httpx
from openai import RateLimitError
request = httpx.Request("GET", "/")
response = httpx.Response(200, request=request)
error = RateLimitError("rate limit", response=response, body="")
openai_llm = ChatOpenAI(max_retries=0)
anthropic_llm = ChatAnthropic()
llm = openai_llm.with_fallbacks([anthropic_llm])
with patch("openai.resources.chat.completions.Completions.create", side_effect=error):
try:
print(openai_llm.invoke("Why did the chicken cross the road?"))
except RateLimitError:
print("Hit error")
with patch("openai.resources.chat.completions.Completions.create", side_effect=error):
try:
print(llm.invoke("Why did the chicken cross the road?"))
except RateLimitError:
print("Hit error")
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a nice assistant who always includes a compliment in your response",
),
("human", "Why did the {animal} cross the road"),
]
)
chain = prompt | llm
with patch("openai.resources.chat.completions.Completions.create", side_effect=error):
try:
print(chain.invoke({"animal": "kangaroo"}))
except RateLimitError:
print("Hit error")
from langchain_core.output_parsers import StrOutputParser
chat_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a nice assistant who always includes a compliment in your response",
),
("human", "Why did the {animal} cross the road"),
]
)
chat_model = ChatOpenAI(model_name="gpt-fake")
bad_chain = chat_prompt | chat_model | StrOutputParser()
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
prompt_template = """Instructions: You should always include a compliment in your response.
Question: Why did the {animal} cross the road?"""
prompt = PromptTemplate.from_template(prompt_template)
llm = OpenAI()
good_chain = prompt | llm
chain = bad_chain.with_fallbacks([good_chain])
chain.invoke({"animal": "turtle"})
short_llm = ChatOpenAI()
long_llm = ChatOpenAI(model="gpt-3.5-turbo-16k")
llm = short_llm.with_fallbacks([long_llm])
inputs = "What is the next number: " + ", ".join(["one", "two"] * 3000)
try:
print(short_llm.invoke(inputs))
except Exception as e:
print(e)
try:
print(llm.invoke(inputs))
except Exception as e:
print(e)
from langchain.output_parsers import DatetimeOutputParser
prompt = ChatPromptTemplate.from_template(
"what time was {event} (in %Y-%m-%dT%H:%M:%S.%fZ format - only return this value)"
)
openai_35 = ChatOpenAI() | | DatetimeOutputParser() | langchain.output_parsers.DatetimeOutputParser |
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools import BaseTool, StructuredTool, tool
@tool
def search(query: str) -> str:
"""Look up things online."""
return "LangChain"
print(search.name)
print(search.description)
print(search.args)
@tool
def multiply(a: int, b: int) -> int:
"""Multiply two numbers."""
return a * b
print(multiply.name)
print(multiply.description)
print(multiply.args)
class SearchInput(BaseModel):
query: str = Field(description="should be a search query")
@tool("search-tool", args_schema=SearchInput, return_direct=True)
def search(query: str) -> str:
"""Look up things online."""
return "LangChain"
print(search.name)
print(search.description)
print(search.args)
print(search.return_direct)
from typing import Optional, Type
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
class SearchInput(BaseModel):
query: str = Field(description="should be a search query")
class CalculatorInput(BaseModel):
a: int = Field(description="first number")
b: int = Field(description="second number")
class CustomSearchTool(BaseTool):
name = "custom_search"
description = "useful for when you need to answer questions about current events"
args_schema: Type[BaseModel] = SearchInput
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
return "LangChain"
async def _arun(
self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("custom_search does not support async")
class CustomCalculatorTool(BaseTool):
name = "Calculator"
description = "useful for when you need to answer questions about math"
args_schema: Type[BaseModel] = CalculatorInput
return_direct: bool = True
def _run(
self, a: int, b: int, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
return a * b
async def _arun(
self,
a: int,
b: int,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("Calculator does not support async")
search = CustomSearchTool()
print(search.name)
print(search.description)
print(search.args)
multiply = CustomCalculatorTool()
print(multiply.name)
print(multiply.description)
print(multiply.args)
print(multiply.return_direct)
def search_function(query: str):
return "LangChain"
search = StructuredTool.from_function(
func=search_function,
name="Search",
description="useful for when you need to answer questions about current events",
)
print(search.name)
print(search.description)
print(search.args)
class CalculatorInput(BaseModel):
a: int = Field(description="first number")
b: int = | Field(description="second number") | langchain.pydantic_v1.Field |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pygithub')
import os
from langchain.agents import AgentType, initialize_agent
from langchain_community.agent_toolkits.github.toolkit import GitHubToolkit
from langchain_community.utilities.github import GitHubAPIWrapper
from langchain_openai import ChatOpenAI
os.environ["GITHUB_APP_ID"] = "123456"
os.environ["GITHUB_APP_PRIVATE_KEY"] = "path/to/your/private-key.pem"
os.environ["GITHUB_REPOSITORY"] = "username/repo-name"
os.environ["GITHUB_BRANCH"] = "bot-branch-name"
os.environ["GITHUB_BASE_BRANCH"] = "main"
os.environ["OPENAI_API_KEY"] = ""
llm = ChatOpenAI(temperature=0, model="gpt-4-1106-preview")
github = GitHubAPIWrapper()
toolkit = GitHubToolkit.from_github_api_wrapper(github)
tools = toolkit.get_tools()
agent = initialize_agent(
tools,
llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
print("Available tools:")
for tool in tools:
print("\t" + tool.name)
agent.run(
"You have the software engineering capabilities of a Google Principle engineer. You are tasked with completing issues on a github repository. Please look at the existing issues and complete them."
)
from langchain import hub
gh_issue_prompt_template = hub.pull("kastanday/new-github-issue")
print(gh_issue_prompt_template.template)
def format_issue(issue):
title = f"Title: {issue.get('title')}."
opened_by = f"Opened by user: {issue.get('opened_by')}"
body = f"Body: {issue.get('body')}"
comments = issue.get("comments") # often too long
return "\n".join([title, opened_by, body])
issue = github.get_issue(33) # task to implement a RNA-seq pipeline (bioinformatics)
final_gh_issue_prompt = gh_issue_prompt_template.format(
issue_description=format_issue(issue)
)
print(final_gh_issue_prompt)
from langchain.memory.summary_buffer import ConversationSummaryBufferMemory
from langchain_core.prompts.chat import MessagesPlaceholder
summarizer_llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo") # type: ignore
chat_history = MessagesPlaceholder(variable_name="chat_history")
memory = ConversationSummaryBufferMemory(
memory_key="chat_history",
return_messages=True,
llm=summarizer_llm,
max_token_limit=2_000,
)
agent = initialize_agent(
tools,
llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True, # or pass a function that accepts the error and returns a string
max_iterations=30,
max_execution_time=None,
early_stopping_method="generate",
memory=memory,
agent_kwargs={
"memory_prompts": [chat_history],
"input_variables": ["input", "agent_scratchpad", "chat_history"],
"prefix": final_gh_issue_prompt,
},
)
from langchain_core.tracers.context import tracing_v2_enabled
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ["LANGCHAIN_API_KEY"] = "ls__......"
os.environ["LANGCHAIN_PROJECT"] = "Github_Demo_PR"
os.environ["LANGCHAIN_WANDB_TRACING"] = "false"
with tracing_v2_enabled(project_name="Github_Demo_PR", tags=["PR_bot"]) as cb:
agent.run(final_gh_issue_prompt)
from langchain.tools.render import render_text_description_and_args
print( | render_text_description_and_args(tools) | langchain.tools.render.render_text_description_and_args |
from langchain.memory import ConversationSummaryBufferMemory
from langchain_openai import OpenAI
llm = OpenAI()
memory = | ConversationSummaryBufferMemory(llm=llm, max_token_limit=10) | langchain.memory.ConversationSummaryBufferMemory |
from typing import List
from langchain.output_parsers import YamlOutputParser
from langchain.prompts import PromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0)
class Joke(BaseModel):
setup: str = | Field(description="question to set up a joke") | langchain_core.pydantic_v1.Field |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-text-splitters tiktoken')
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
from langchain_text_splitters import CharacterTextSplitter
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
chunk_size=100, chunk_overlap=0
)
texts = text_splitter.split_text(state_of_the_union)
print(texts[0])
from langchain_text_splitters import TokenTextSplitter
text_splitter = TokenTextSplitter(chunk_size=10, chunk_overlap=0)
texts = text_splitter.split_text(state_of_the_union)
print(texts[0])
get_ipython().run_line_magic('pip', 'install --upgrade --quiet spacy')
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
from langchain_text_splitters import SpacyTextSplitter
text_splitter = SpacyTextSplitter(chunk_size=1000)
texts = text_splitter.split_text(state_of_the_union)
print(texts[0])
from langchain_text_splitters import SentenceTransformersTokenTextSplitter
splitter = SentenceTransformersTokenTextSplitter(chunk_overlap=0)
text = "Lorem "
count_start_and_stop_tokens = 2
text_token_count = splitter.count_tokens(text=text) - count_start_and_stop_tokens
print(text_token_count)
token_multiplier = splitter.maximum_tokens_per_chunk // text_token_count + 1
text_to_split = text * token_multiplier
print(f"tokens in text to split: {splitter.count_tokens(text=text_to_split)}")
text_chunks = splitter.split_text(text=text_to_split)
print(text_chunks[1])
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
from langchain_text_splitters import NLTKTextSplitter
text_splitter = | NLTKTextSplitter(chunk_size=1000) | langchain_text_splitters.NLTKTextSplitter |
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools import BaseTool, StructuredTool, tool
@tool
def search(query: str) -> str:
"""Look up things online."""
return "LangChain"
print(search.name)
print(search.description)
print(search.args)
@tool
def multiply(a: int, b: int) -> int:
"""Multiply two numbers."""
return a * b
print(multiply.name)
print(multiply.description)
print(multiply.args)
class SearchInput(BaseModel):
query: str = Field(description="should be a search query")
@ | tool("search-tool", args_schema=SearchInput, return_direct=True) | langchain.tools.tool |
get_ipython().run_line_magic('pip', 'install -U --quiet langchain langchain_community openai chromadb langchain-experimental')
get_ipython().run_line_magic('pip', 'install --quiet "unstructured[all-docs]" pypdf pillow pydantic lxml pillow matplotlib chromadb tiktoken')
import logging
import zipfile
import requests
logging.basicConfig(level=logging.INFO)
data_url = "https://storage.googleapis.com/benchmarks-artifacts/langchain-docs-benchmarking/cj.zip"
result = requests.get(data_url)
filename = "cj.zip"
with open(filename, "wb") as file:
file.write(result.content)
with zipfile.ZipFile(filename, "r") as zip_ref:
zip_ref.extractall()
from langchain_community.document_loaders import PyPDFLoader
loader = | PyPDFLoader("./cj/cj.pdf") | langchain_community.document_loaders.PyPDFLoader |
get_ipython().system("python3 -m pip install --upgrade langchain 'deeplake[enterprise]' openai tiktoken")
import getpass
import os
from langchain_community.vectorstores import DeepLake
from langchain_openai import OpenAIEmbeddings
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
activeloop_token = getpass.getpass("Activeloop Token:")
os.environ["ACTIVELOOP_TOKEN"] = activeloop_token
embeddings = OpenAIEmbeddings(disallowed_special=())
get_ipython().system('git clone https://github.com/twitter/the-algorithm # replace any repository of your choice')
import os
from langchain_community.document_loaders import TextLoader
root_dir = "./the-algorithm"
docs = []
for dirpath, dirnames, filenames in os.walk(root_dir):
for file in filenames:
try:
loader = TextLoader(os.path.join(dirpath, file), encoding="utf-8")
docs.extend(loader.load_and_split())
except Exception:
pass
from langchain_text_splitters import CharacterTextSplitter
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(docs)
username = "<USERNAME_OR_ORG>" # replace with your username from app.activeloop.ai
db = DeepLake(
dataset_path=f"hub://{username}/twitter-algorithm",
embedding=embeddings,
)
db.add_documents(texts)
db = DeepLake(
dataset_path=f"hub://{username}/twitter-algorithm",
read_only=True,
embedding=embeddings,
)
retriever = db.as_retriever()
retriever.search_kwargs["distance_metric"] = "cos"
retriever.search_kwargs["fetch_k"] = 100
retriever.search_kwargs["maximal_marginal_relevance"] = True
retriever.search_kwargs["k"] = 10
def filter(x):
if "com.google" in x["text"].data()["value"]:
return False
metadata = x["metadata"].data()["value"]
return "scala" in metadata["source"] or "py" in metadata["source"]
from langchain.chains import ConversationalRetrievalChain
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model_name="gpt-3.5-turbo-0613") # switch to 'gpt-4'
qa = | ConversationalRetrievalChain.from_llm(model, retriever=retriever) | langchain.chains.ConversationalRetrievalChain.from_llm |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet redis redisvl langchain-openai tiktoken')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redis_url = "redis://localhost:6379"
redis_url = "redis://:secret@redis:7379/2"
redis_url = "redis://joe:secret@redis/0"
redis_url = "redis+sentinel://localhost:26379"
redis_url = "redis+sentinel://joe:secret@redis"
redis_url = "redis+sentinel://redis:26379/zone-1/2"
redis_url = "rediss://localhost:6379"
redis_url = "rediss+sentinel://localhost"
metadata = [
{
"user": "john",
"age": 18,
"job": "engineer",
"credit_score": "high",
},
{
"user": "derrick",
"age": 45,
"job": "doctor",
"credit_score": "low",
},
{
"user": "nancy",
"age": 94,
"job": "doctor",
"credit_score": "high",
},
{
"user": "tyler",
"age": 100,
"job": "engineer",
"credit_score": "high",
},
{
"user": "joe",
"age": 35,
"job": "dentist",
"credit_score": "medium",
},
]
texts = ["foo", "foo", "foo", "bar", "bar"]
from langchain_community.vectorstores.redis import Redis
rds = Redis.from_texts(
texts,
embeddings,
metadatas=metadata,
redis_url="redis://localhost:6379",
index_name="users",
)
rds.index_name
get_ipython().system('rvl index listall')
get_ipython().system('rvl index info -i users')
get_ipython().system('rvl stats -i users')
results = rds.similarity_search("foo")
print(results[0].page_content)
results = rds.similarity_search("foo", k=3)
meta = results[1].metadata
print("Key of the document in Redis: ", meta.pop("id"))
print("Metadata of the document: ", meta)
results = rds.similarity_search_with_score("foo", k=5)
for result in results:
print(f"Content: {result[0].page_content} --- Score: {result[1]}")
results = rds.similarity_search_with_score("foo", k=5, distance_threshold=0.1)
for result in results:
print(f"Content: {result[0].page_content} --- Score: {result[1]}")
results = rds.similarity_search_with_relevance_scores("foo", k=5)
for result in results:
print(f"Content: {result[0].page_content} --- Similiarity: {result[1]}")
results = rds.similarity_search_with_relevance_scores("foo", k=5, score_threshold=0.9)
for result in results:
print(f"Content: {result[0].page_content} --- Similarity: {result[1]}")
new_document = ["baz"]
new_metadata = [{"user": "sam", "age": 50, "job": "janitor", "credit_score": "high"}]
rds.add_texts(new_document, new_metadata)
results = rds.similarity_search("baz", k=3)
print(results[0].metadata)
results = rds.max_marginal_relevance_search("foo")
results = rds.max_marginal_relevance_search("foo", lambda_mult=0.1)
rds.write_schema("redis_schema.yaml")
new_rds = Redis.from_existing_index(
embeddings,
index_name="users",
redis_url="redis://localhost:6379",
schema="redis_schema.yaml",
)
results = new_rds.similarity_search("foo", k=3)
print(results[0].metadata)
new_rds.schema == rds.schema
index_schema = {
"tag": [{"name": "credit_score"}],
"text": [{"name": "user"}, {"name": "job"}],
"numeric": [{"name": "age"}],
}
rds, keys = Redis.from_texts_return_keys(
texts,
embeddings,
metadatas=metadata,
redis_url="redis://localhost:6379",
index_name="users_modified",
index_schema=index_schema, # pass in the new index schema
)
from langchain_community.vectorstores.redis import RedisText
is_engineer = RedisText("job") == "engineer"
results = rds.similarity_search("foo", k=3, filter=is_engineer)
print("Job:", results[0].metadata["job"])
print("Engineers in the dataset:", len(results))
starts_with_doc = RedisText("job") % "doc*"
results = rds.similarity_search("foo", k=3, filter=starts_with_doc)
for result in results:
print("Job:", result.metadata["job"])
print("Jobs in dataset that start with 'doc':", len(results))
from langchain_community.vectorstores.redis import RedisNum
is_over_18 = RedisNum("age") > 18
is_under_99 = RedisNum("age") < 99
age_range = is_over_18 & is_under_99
results = rds.similarity_search("foo", filter=age_range)
for result in results:
print("User:", result.metadata["user"], "is", result.metadata["age"])
age_range = (RedisNum("age") > 18) & (RedisNum("age") < 99)
results = rds.similarity_search("foo", filter=age_range)
for result in results:
print("User:", result.metadata["user"], "is", result.metadata["age"])
query = "foo"
results = rds.similarity_search_with_score(query, k=3, return_metadata=True)
for result in results:
print("Content:", result[0].page_content, " --- Score: ", result[1])
retriever = rds.as_retriever(search_type="similarity", search_kwargs={"k": 4})
docs = retriever.get_relevant_documents(query)
docs
retriever = rds.as_retriever(
search_type="similarity_distance_threshold",
search_kwargs={"k": 4, "distance_threshold": 0.1},
)
docs = retriever.get_relevant_documents(query)
docs
retriever = rds.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"score_threshold": 0.9, "k": 10},
)
retriever.get_relevant_documents("foo")
retriever = rds.as_retriever(
search_type="mmr", search_kwargs={"fetch_k": 20, "k": 4, "lambda_mult": 0.1}
)
retriever.get_relevant_documents("foo")
| Redis.delete(keys, redis_url="redis://localhost:6379") | langchain_community.vectorstores.redis.Redis.delete |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet playwright beautifulsoup4')
get_ipython().system(' playwright install')
from langchain_community.document_loaders import AsyncChromiumLoader
urls = ["https://www.wsj.com"]
loader = | AsyncChromiumLoader(urls) | langchain_community.document_loaders.AsyncChromiumLoader |
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-community')
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema.messages import AIMessage
from langchain_community.llms.chatglm3 import ChatGLM3
template = """{question}"""
prompt = PromptTemplate.from_template(template)
endpoint_url = "http://127.0.0.1:8000/v1/chat/completions"
messages = [
AIMessage(content="我将从美国到中国来旅游,出行前希望了解中国的城市"),
| AIMessage(content="欢迎问我任何问题。") | langchain.schema.messages.AIMessage |
from langchain_community.chat_models import ChatDatabricks
from langchain_core.messages import HumanMessage
from mlflow.deployments import get_deploy_client
client = get_deploy_client("databricks")
secret = "secrets/<scope>/openai-api-key" # replace `<scope>` with your scope
name = "my-chat" # rename this if my-chat already exists
client.create_endpoint(
name=name,
config={
"served_entities": [
{
"name": "my-chat",
"external_model": {
"name": "gpt-4",
"provider": "openai",
"task": "llm/v1/chat",
"openai_config": {
"openai_api_key": "{{" + secret + "}}",
},
},
}
],
},
)
chat = ChatDatabricks(
target_uri="databricks",
endpoint=name,
temperature=0.1,
)
chat([HumanMessage(content="hello")])
from langchain_community.embeddings import DatabricksEmbeddings
embeddings = DatabricksEmbeddings(endpoint="databricks-bge-large-en")
embeddings.embed_query("hello")[:3]
from langchain_community.llms import Databricks
llm = Databricks(endpoint_name="dolly")
llm("How are you?")
llm("How are you?", stop=["."])
import os
import dbutils
os.environ["DATABRICKS_TOKEN"] = dbutils.secrets.get("myworkspace", "api_token")
llm = | Databricks(host="myworkspace.cloud.databricks.com", endpoint_name="dolly") | langchain_community.llms.Databricks |
import getpass
import os
os.environ["TAVILY_API_KEY"] = getpass.getpass()
from langchain_community.tools.tavily_search import TavilySearchResults
tool = TavilySearchResults()
tool.invoke({"query": "What happened in the latest burning man floods"})
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain import hub
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain_openai import ChatOpenAI
instructions = """You are an assistant."""
base_prompt = hub.pull("langchain-ai/openai-functions-template")
prompt = base_prompt.partial(instructions=instructions)
llm = ChatOpenAI(temperature=0)
tavily_tool = | TavilySearchResults() | langchain_community.tools.tavily_search.TavilySearchResults |
import os
os.environ["LANGCHAIN_WANDB_TRACING"] = "true"
os.environ["WANDB_PROJECT"] = "langchain-tracing"
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.callbacks import wandb_tracing_enabled
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
tools = load_tools(["llm-math"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("What is 2 raised to .123243 power?") # this should be traced
if "LANGCHAIN_WANDB_TRACING" in os.environ:
del os.environ["LANGCHAIN_WANDB_TRACING"]
with | wandb_tracing_enabled() | langchain.callbacks.wandb_tracing_enabled |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet dingodb')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet git+https://git@github.com/dingodb/pydingo.git')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Dingo
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
from dingodb import DingoDB
index_name = "langchain_demo"
dingo_client = DingoDB(user="", password="", host=["127.0.0.1:13000"])
if (
index_name not in dingo_client.get_index()
and index_name.upper() not in dingo_client.get_index()
):
dingo_client.create_index(
index_name=index_name, dimension=1536, metric_type="cosine", auto_id=False
)
docsearch = Dingo.from_documents(
docs, embeddings, client=dingo_client, index_name=index_name
)
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Dingo
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
query = "What did the president say about Ketanji Brown Jackson"
docs = docsearch.similarity_search(query)
print(docs[0].page_content)
vectorstore = | Dingo(embeddings, "text", client=dingo_client, index_name=index_name) | langchain_community.vectorstores.Dingo |
from langchain.output_parsers import DatetimeOutputParser
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
output_parser = | DatetimeOutputParser() | langchain.output_parsers.DatetimeOutputParser |
import os
os.environ["EXA_API_KEY"] = "..."
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-exa')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_exa import ExaSearchRetriever, TextContentsOptions
from langchain_openai import ChatOpenAI
retriever = ExaSearchRetriever(
k=5, text_contents_options= | TextContentsOptions(max_length=200) | langchain_exa.TextContentsOptions |
from langchain.chains import HypotheticalDocumentEmbedder, LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI, OpenAIEmbeddings
base_embeddings = OpenAIEmbeddings()
llm = OpenAI()
embeddings = | HypotheticalDocumentEmbedder.from_llm(llm, base_embeddings, "web_search") | langchain.chains.HypotheticalDocumentEmbedder.from_llm |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.prompts import PromptTemplate
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0).configurable_fields(
temperature=ConfigurableField(
id="llm_temperature",
name="LLM Temperature",
description="The temperature of the LLM",
)
)
model.invoke("pick a random number")
model.with_config(configurable={"llm_temperature": 0.9}).invoke("pick a random number")
prompt = PromptTemplate.from_template("Pick a random number above {x}")
chain = prompt | model
chain.invoke({"x": 0})
chain.with_config(configurable={"llm_temperature": 0.9}).invoke({"x": 0})
from langchain.runnables.hub import HubRunnable
prompt = HubRunnable("rlm/rag-prompt").configurable_fields(
owner_repo_commit=ConfigurableField(
id="hub_commit",
name="Hub Commit",
description="The Hub commit to pull from",
)
)
prompt.invoke({"question": "foo", "context": "bar"})
prompt.with_config(configurable={"hub_commit": "rlm/rag-prompt-llama"}).invoke(
{"question": "foo", "context": "bar"}
)
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatAnthropic
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
llm = ChatAnthropic(temperature=0).configurable_alternatives(
ConfigurableField(id="llm"),
default_key="anthropic",
openai=ChatOpenAI(),
gpt4=ChatOpenAI(model="gpt-4"),
)
prompt = PromptTemplate.from_template("Tell me a joke about {topic}")
chain = prompt | llm
chain.invoke({"topic": "bears"})
chain.with_config(configurable={"llm": "openai"}).invoke({"topic": "bears"})
chain.with_config(configurable={"llm": "anthropic"}).invoke({"topic": "bears"})
llm = | ChatAnthropic(temperature=0) | langchain_community.chat_models.ChatAnthropic |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-core langchain langchain-openai')
from langchain.utils.math import cosine_similarity
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
physics_template = """You are a very smart physics professor. \
You are great at answering questions about physics in a concise and easy to understand manner. \
When you don't know the answer to a question you admit that you don't know.
Here is a question:
{query}"""
math_template = """You are a very good mathematician. You are great at answering math questions. \
You are so good because you are able to break down hard problems into their component parts, \
answer the component parts, and then put them together to answer the broader question.
Here is a question:
{query}"""
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
import getpass
import os
os.environ["POLYGON_API_KEY"] = getpass.getpass()
from langchain_community.tools.polygon.financials import PolygonFinancials
from langchain_community.tools.polygon.last_quote import PolygonLastQuote
from langchain_community.tools.polygon.ticker_news import PolygonTickerNews
from langchain_community.utilities.polygon import PolygonAPIWrapper
api_wrapper = PolygonAPIWrapper()
ticker = "AAPL"
last_quote_tool = | PolygonLastQuote(api_wrapper=api_wrapper) | langchain_community.tools.polygon.last_quote.PolygonLastQuote |
from langchain.evaluation import load_evaluator
evaluator = load_evaluator("criteria", criteria="conciseness")
from langchain.evaluation import EvaluatorType
evaluator = | load_evaluator(EvaluatorType.CRITERIA, criteria="conciseness") | langchain.evaluation.load_evaluator |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet wikipedia')
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_openai import OpenAI
api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)
tool = WikipediaQueryRun(api_wrapper=api_wrapper)
tools = [tool]
prompt = hub.pull("hwchase17/react")
llm = OpenAI(temperature=0)
agent = create_react_agent(llm, tools, prompt)
agent_executor = | AgentExecutor(agent=agent, tools=tools, verbose=True) | langchain.agents.AgentExecutor |
get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-memorystore-redis')
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
from google.colab import auth
auth.authenticate_user()
import redis
from langchain_google_memorystore_redis import (
DistanceStrategy,
HNSWConfig,
RedisVectorStore,
)
redis_client = redis.from_url("redis://127.0.0.1:6379")
index_config = HNSWConfig(
name="my_vector_index", distance_strategy=DistanceStrategy.COSINE, vector_size=128
)
RedisVectorStore.init_index(client=redis_client, index_config=index_config)
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("./state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
from langchain_community.embeddings.fake import FakeEmbeddings
embeddings = FakeEmbeddings(size=128)
redis_client = redis.from_url("redis://127.0.0.1:6379")
rvs = RedisVectorStore.from_documents(
docs, embedding=embeddings, client=redis_client, index_name="my_vector_index"
)
rvs = RedisVectorStore(
client=redis_client, index_name="my_vector_index", embeddings=embeddings
)
ids = rvs.add_texts(
texts=[d.page_content for d in docs], metadatas=[d.metadata for d in docs]
)
import pprint
query = "What did the president say about Ketanji Brown Jackson"
knn_results = rvs.similarity_search(query=query)
pprint.pprint(knn_results)
rq_results = rvs.similarity_search_with_score(query=query, distance_threshold=0.8)
pprint.pprint(rq_results)
mmr_results = rvs.max_marginal_relevance_search(query=query, lambda_mult=0.90)
pprint.pprint(mmr_results)
retriever = rvs.as_retriever()
results = retriever.invoke(query)
pprint.pprint(results)
rvs.delete(ids)
| RedisVectorStore.drop_index(client=redis_client, index_name="my_vector_index") | langchain_google_memorystore_redis.RedisVectorStore.drop_index |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-nvidia-ai-endpoints')
import getpass
import os
if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"):
nvapi_key = getpass.getpass("Enter your NVIDIA API key: ")
assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key"
os.environ["NVIDIA_API_KEY"] = nvapi_key
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="mixtral_8x7b")
result = llm.invoke("Write a ballad about LangChain.")
print(result.content)
print(llm.batch(["What's 2*3?", "What's 2*6?"]))
for chunk in llm.stream("How far can a seagull fly in one day?"):
print(chunk.content, end="|")
async for chunk in llm.astream(
"How long does it take for monarch butterflies to migrate?"
):
print(chunk.content, end="|")
ChatNVIDIA.get_available_models()
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_nvidia_ai_endpoints import ChatNVIDIA
prompt = ChatPromptTemplate.from_messages(
[("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")]
)
chain = prompt | ChatNVIDIA(model="llama2_13b") | StrOutputParser()
for txt in chain.stream({"input": "What's your name?"}):
print(txt, end="")
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are an expert coding AI. Respond only in valid python; no narration whatsoever.",
),
("user", "{input}"),
]
)
chain = prompt | ChatNVIDIA(model="llama2_code_70b") | StrOutputParser()
for txt in chain.stream({"input": "How do I solve this fizz buzz problem?"}):
print(txt, end="")
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="nemotron_steerlm_8b")
complex_result = llm.invoke(
"What's a PB&J?", labels={"creativity": 0, "complexity": 3, "verbosity": 0}
)
print("Un-creative\n")
print(complex_result.content)
print("\n\nCreative\n")
creative_result = llm.invoke(
"What's a PB&J?", labels={"creativity": 9, "complexity": 3, "verbosity": 9}
)
print(creative_result.content)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_nvidia_ai_endpoints import ChatNVIDIA
prompt = ChatPromptTemplate.from_messages(
[("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")]
)
chain = (
prompt
| ChatNVIDIA(model="nemotron_steerlm_8b").bind(
labels={"creativity": 9, "complexity": 0, "verbosity": 9}
)
| StrOutputParser()
)
for txt in chain.stream({"input": "Why is a PB&J?"}):
print(txt, end="")
import IPython
import requests
image_url = "https://www.nvidia.com/content/dam/en-zz/Solutions/research/ai-playground/nvidia-picasso-3c33-p@2x.jpg" ## Large Image
image_content = requests.get(image_url).content
IPython.display.Image(image_content)
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="playground_neva_22b")
from langchain_core.messages import HumanMessage
llm.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "Describe this image:"},
{"type": "image_url", "image_url": {"url": image_url}},
]
)
]
)
from langchain_core.messages import HumanMessage
llm.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "Describe this image:"},
{"type": "image_url", "image_url": {"url": image_url}},
]
)
],
labels={"creativity": 0, "quality": 9, "complexity": 0, "verbosity": 0},
)
import IPython
import requests
image_url = "https://picsum.photos/seed/kitten/300/200"
image_content = requests.get(image_url).content
IPython.display.Image(image_content)
import base64
from langchain_core.messages import HumanMessage
b64_string = base64.b64encode(image_content).decode("utf-8")
llm.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "Describe this image:"},
{
"type": "image_url",
"image_url": {"url": f"data:image/png;base64,{b64_string}"},
},
]
)
]
)
base64_with_mime_type = f"data:image/png;base64,{b64_string}"
llm.invoke(f'What\'s in this image?\n<img src="{base64_with_mime_type}" />')
from langchain_nvidia_ai_endpoints import ChatNVIDIA
kosmos = ChatNVIDIA(model="kosmos_2")
from langchain_core.messages import HumanMessage
def drop_streaming_key(d):
"""Takes in payload dictionary, outputs new payload dictionary"""
if "stream" in d:
d.pop("stream")
return d
kosmos = | ChatNVIDIA(model="kosmos_2") | langchain_nvidia_ai_endpoints.ChatNVIDIA |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sagemaker')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-search-results')
import os
os.environ["OPENAI_API_KEY"] = "<ADD-KEY-HERE>"
os.environ["SERPAPI_API_KEY"] = "<ADD-KEY-HERE>"
from langchain.agents import initialize_agent, load_tools
from langchain.callbacks import SageMakerCallbackHandler
from langchain.chains import LLMChain, SimpleSequentialChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
from sagemaker.analytics import ExperimentAnalytics
from sagemaker.experiments.run import Run
from sagemaker.session import Session
HPARAMS = {
"temperature": 0.1,
"model_name": "gpt-3.5-turbo-instruct",
}
BUCKET_NAME = None
EXPERIMENT_NAME = "langchain-sagemaker-tracker"
session = Session(default_bucket=BUCKET_NAME)
RUN_NAME = "run-scenario-1"
PROMPT_TEMPLATE = "tell me a joke about {topic}"
INPUT_VARIABLES = {"topic": "fish"}
with Run(
experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session
) as run:
sagemaker_callback = SageMakerCallbackHandler(run)
llm = OpenAI(callbacks=[sagemaker_callback], **HPARAMS)
prompt = PromptTemplate.from_template(template=PROMPT_TEMPLATE)
chain = LLMChain(llm=llm, prompt=prompt, callbacks=[sagemaker_callback])
chain.run(**INPUT_VARIABLES)
sagemaker_callback.flush_tracker()
RUN_NAME = "run-scenario-2"
PROMPT_TEMPLATE_1 = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
Title: {title}
Playwright: This is a synopsis for the above play:"""
PROMPT_TEMPLATE_2 = """You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.
Play Synopsis: {synopsis}
Review from a New York Times play critic of the above play:"""
INPUT_VARIABLES = {
"input": "documentary about good video games that push the boundary of game design"
}
with Run(
experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session
) as run:
sagemaker_callback = SageMakerCallbackHandler(run)
prompt_template1 = PromptTemplate.from_template(template=PROMPT_TEMPLATE_1)
prompt_template2 = PromptTemplate.from_template(template=PROMPT_TEMPLATE_2)
llm = OpenAI(callbacks=[sagemaker_callback], **HPARAMS)
chain1 = LLMChain(llm=llm, prompt=prompt_template1, callbacks=[sagemaker_callback])
chain2 = LLMChain(llm=llm, prompt=prompt_template2, callbacks=[sagemaker_callback])
overall_chain = SimpleSequentialChain(
chains=[chain1, chain2], callbacks=[sagemaker_callback]
)
overall_chain.run(**INPUT_VARIABLES)
sagemaker_callback.flush_tracker()
RUN_NAME = "run-scenario-3"
PROMPT_TEMPLATE = "Who is the oldest person alive? And what is their current age raised to the power of 1.51?"
with Run(
experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session
) as run:
sagemaker_callback = | SageMakerCallbackHandler(run) | langchain.callbacks.SageMakerCallbackHandler |
get_ipython().system(' pip install langchain unstructured[all-docs] pydantic lxml')
from typing import Any
from pydantic import BaseModel
from unstructured.partition.pdf import partition_pdf
path = "/Users/rlm/Desktop/Papers/LLaVA/"
raw_pdf_elements = partition_pdf(
filename=path + "LLaVA.pdf",
extract_images_in_pdf=True,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
category_counts = {}
for element in raw_pdf_elements:
category = str(type(element))
if category in category_counts:
category_counts[category] += 1
else:
category_counts[category] = 1
unique_categories = set(category_counts.keys())
category_counts
class Element(BaseModel):
type: str
text: Any
categorized_elements = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
categorized_elements.append(Element(type="table", text=str(element)))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
categorized_elements.append(Element(type="text", text=str(element)))
table_elements = [e for e in categorized_elements if e.type == "table"]
print(len(table_elements))
text_elements = [e for e in categorized_elements if e.type == "text"]
print(len(text_elements))
from langchain_community.chat_models import ChatOllama
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
prompt_text = """You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOllama(model="llama2:13b-chat")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
texts = [i.text for i in text_elements if i.text != ""]
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5})
tables = [i.text for i in table_elements]
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
get_ipython().run_cell_magic('bash', '', '\n# Define the directory containing the images\nIMG_DIR=~/Desktop/Papers/LLaVA/\n\n# Loop through each image in the directory\nfor img in "${IMG_DIR}"*.jpg; do\n # Extract the base name of the image without extension\n base_name=$(basename "$img" .jpg)\n\n # Define the output file name based on the image name\n output_file="${IMG_DIR}${base_name}.txt"\n\n # Execute the command and save the output to the defined output file\n /Users/rlm/Desktop/Code/llama.cpp/bin/llava -m ../models/llava-7b/ggml-model-q5_k.gguf --mmproj ../models/llava-7b/mmproj-model-f16.gguf --temp 0.1 -p "Describe the image in detail. Be specific about graphs, such as bar plots." --image "$img" > "$output_file"\n\ndone\n')
import glob
import os
file_paths = glob.glob(os.path.expanduser(os.path.join(path, "*.txt")))
img_summaries = []
for file_path in file_paths:
with open(file_path, "r") as file:
img_summaries.append(file.read())
cleaned_img_summary = [
s.split("clip_model_load: total allocated memory: 201.27 MB\n\n", 1)[1].strip()
for s in img_summaries
]
import uuid
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_community.embeddings import GPT4AllEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
vectorstore = Chroma(
collection_name="summaries", embedding_function=GPT4AllEmbeddings()
)
store = InMemoryStore() # <- Can we extend this to images
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
doc_ids = [str(uuid.uuid4()) for _ in texts]
summary_texts = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(text_summaries)
]
retriever.vectorstore.add_documents(summary_texts)
retriever.docstore.mset(list(zip(doc_ids, texts)))
table_ids = [str(uuid.uuid4()) for _ in tables]
summary_tables = [
Document(page_content=s, metadata={id_key: table_ids[i]})
for i, s in enumerate(table_summaries)
]
retriever.vectorstore.add_documents(summary_tables)
retriever.docstore.mset(list(zip(table_ids, tables)))
img_ids = [str(uuid.uuid4()) for _ in cleaned_img_summary]
summary_img = [
Document(page_content=s, metadata={id_key: img_ids[i]})
for i, s in enumerate(cleaned_img_summary)
]
retriever.vectorstore.add_documents(summary_img)
retriever.docstore.mset(
list(zip(img_ids, cleaned_img_summary))
) # Store the image summary as the raw document
retriever.get_relevant_documents("Images / figures with playful and creative examples")[
0
]
from langchain_core.runnables import RunnablePassthrough
template = """Answer the question based only on the following context, which can include text and tables:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = | ChatOllama(model="llama2:13b-chat") | langchain_community.chat_models.ChatOllama |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3')
from langchain_community.document_loaders import S3DirectoryLoader
loader = | S3DirectoryLoader("testing-hwc") | langchain_community.document_loaders.S3DirectoryLoader |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
- Downloads last month
- 35