prompt
stringlengths 43
25.9k
| completion
stringlengths 7
362
| api
stringlengths 18
90
|
---|---|---|
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
import os
os.environ["OUTLINE_API_KEY"] = "xxx"
os.environ["OUTLINE_INSTANCE_URL"] = "https://app.getoutline.com"
from langchain.retrievers import OutlineRetriever
retriever = OutlineRetriever()
retriever.get_relevant_documents(query="LangChain", doc_content_chars_max=100)
import os
from getpass import getpass
os.environ["OPENAI_API_KEY"] = getpass("OpenAI API Key:")
from langchain.chains import ConversationalRetrievalChain
from langchain_openai import ChatOpenAI
model = | ChatOpenAI(model_name="gpt-3.5-turbo") | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiledb-vector-search')
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import TileDB
from langchain_text_splitters import CharacterTextSplitter
raw_documents = TextLoader("../../modules/state_of_the_union.txt").load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = text_splitter.split_documents(raw_documents)
embeddings = | HuggingFaceEmbeddings() | langchain_community.embeddings.HuggingFaceEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pymysql')
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import (
DirectoryLoader,
UnstructuredMarkdownLoader,
)
from langchain_community.vectorstores import StarRocks
from langchain_community.vectorstores.starrocks import StarRocksSettings
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import TokenTextSplitter
update_vectordb = False
loader = DirectoryLoader(
"./docs", glob="**/*.md", loader_cls=UnstructuredMarkdownLoader
)
documents = loader.load()
text_splitter = TokenTextSplitter(chunk_size=400, chunk_overlap=50)
split_docs = text_splitter.split_documents(documents)
update_vectordb = True
split_docs[-20]
print("# docs = %d, # splits = %d" % (len(documents), len(split_docs)))
def gen_starrocks(update_vectordb, embeddings, settings):
if update_vectordb:
docsearch = StarRocks.from_documents(split_docs, embeddings, config=settings)
else:
docsearch = | StarRocks(embeddings, settings) | langchain_community.vectorstores.StarRocks |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet "unstructured[all-docs]"')
from langchain_community.document_loaders import UnstructuredFileLoader
loader = | UnstructuredFileLoader("./example_data/state_of_the_union.txt") | langchain_community.document_loaders.UnstructuredFileLoader |
from langchain_experimental.llm_bash.base import LLMBashChain
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
text = "Please write a bash script that prints 'Hello World' to the console."
bash_chain = LLMBashChain.from_llm(llm, verbose=True)
bash_chain.run(text)
from langchain.prompts.prompt import PromptTemplate
from langchain_experimental.llm_bash.prompt import BashOutputParser
_PROMPT_TEMPLATE = """If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put "#!/bin/bash" in your answer. Make sure to reason step by step, using this format:
Question: "copy the files in the directory named 'target' into a new directory at the same level as target called 'myNewDirectory'"
I need to take the following actions:
- List all files in the directory
- Create a new directory
- Copy the files from the first directory into the second directory
```bash
ls
mkdir myNewDirectory
cp -r target/* myNewDirectory
```
Do not use 'echo' when writing the script.
That is the format. Begin!
Question: {question}"""
PROMPT = PromptTemplate(
input_variables=["question"],
template=_PROMPT_TEMPLATE,
output_parser=BashOutputParser(),
)
bash_chain = LLMBashChain.from_llm(llm, prompt=PROMPT, verbose=True)
text = "Please write a bash script that prints 'Hello World' to the console."
bash_chain.run(text)
from langchain_experimental.llm_bash.bash import BashProcess
persistent_process = BashProcess(persistent=True)
bash_chain = | LLMBashChain.from_llm(llm, bash_process=persistent_process, verbose=True) | langchain_experimental.llm_bash.base.LLMBashChain.from_llm |
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.prompts import PromptTemplate
from langchain_community.llms import TitanTakeoffPro
llm = TitanTakeoffPro()
output = llm("What is the weather in London in August?")
print(output)
llm = TitanTakeoffPro(
base_url="http://localhost:3000",
min_new_tokens=128,
max_new_tokens=512,
no_repeat_ngram_size=2,
sampling_topk=1,
sampling_topp=1.0,
sampling_temperature=1.0,
repetition_penalty=1.0,
regex_string="",
)
output = llm("What is the largest rainforest in the world?")
print(output)
llm = TitanTakeoffPro()
rich_output = llm.generate(["What is Deep Learning?", "What is Machine Learning?"])
print(rich_output.generations)
llm = TitanTakeoffPro(
streaming=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])
)
prompt = "What is the capital of France?"
llm(prompt)
llm = TitanTakeoffPro()
prompt = | PromptTemplate.from_template("Tell me about {topic}") | langchain.prompts.PromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.prompts import PromptTemplate
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0).configurable_fields(
temperature=ConfigurableField(
id="llm_temperature",
name="LLM Temperature",
description="The temperature of the LLM",
)
)
model.invoke("pick a random number")
model.with_config(configurable={"llm_temperature": 0.9}).invoke("pick a random number")
prompt = PromptTemplate.from_template("Pick a random number above {x}")
chain = prompt | model
chain.invoke({"x": 0})
chain.with_config(configurable={"llm_temperature": 0.9}).invoke({"x": 0})
from langchain.runnables.hub import HubRunnable
prompt = HubRunnable("rlm/rag-prompt").configurable_fields(
owner_repo_commit=ConfigurableField(
id="hub_commit",
name="Hub Commit",
description="The Hub commit to pull from",
)
)
prompt.invoke({"question": "foo", "context": "bar"})
prompt.with_config(configurable={"hub_commit": "rlm/rag-prompt-llama"}).invoke(
{"question": "foo", "context": "bar"}
)
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatAnthropic
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
llm = | ChatAnthropic(temperature=0) | langchain_community.chat_models.ChatAnthropic |
get_ipython().system(' pip install langchain unstructured[all-docs] pydantic lxml langchainhub')
get_ipython().system(' brew install tesseract')
get_ipython().system(' brew install poppler')
path = "/Users/rlm/Desktop/Papers/LLaMA2/"
from typing import Any
from pydantic import BaseModel
from unstructured.partition.pdf import partition_pdf
raw_pdf_elements = partition_pdf(
filename=path + "LLaMA2.pdf",
extract_images_in_pdf=False,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
category_counts = {}
for element in raw_pdf_elements:
category = str(type(element))
if category in category_counts:
category_counts[category] += 1
else:
category_counts[category] = 1
unique_categories = set(category_counts.keys())
category_counts
class Element(BaseModel):
type: str
text: Any
categorized_elements = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
categorized_elements.append(Element(type="table", text=str(element)))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
categorized_elements.append(Element(type="text", text=str(element)))
table_elements = [e for e in categorized_elements if e.type == "table"]
print(len(table_elements))
text_elements = [e for e in categorized_elements if e.type == "text"]
print(len(text_elements))
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
prompt_text = """You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: {element} """
prompt = | ChatPromptTemplate.from_template(prompt_text) | langchain_core.prompts.ChatPromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain_core.tools import tool
@tool
def complex_tool(int_arg: int, float_arg: float, dict_arg: dict) -> int:
"""Do something complex with a complex tool."""
return int_arg * float_arg
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
model_with_tools = model.bind_tools(
[complex_tool],
tool_choice="complex_tool",
)
from operator import itemgetter
from langchain.output_parsers import JsonOutputKeyToolsParser
from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough
chain = (
model_with_tools
| JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True)
| complex_tool
)
chain.invoke(
"use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg"
)
from typing import Any
from langchain_core.runnables import RunnableConfig
def try_except_tool(tool_args: dict, config: RunnableConfig) -> Runnable:
try:
complex_tool.invoke(tool_args, config=config)
except Exception as e:
return f"Calling tool with arguments:\n\n{tool_args}\n\nraised the following error:\n\n{type(e)}: {e}"
chain = (
model_with_tools
| JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True)
| try_except_tool
)
print(
chain.invoke(
"use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg"
)
)
chain = (
model_with_tools
| JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True)
| complex_tool
)
better_model = ChatOpenAI(model="gpt-4-1106-preview", temperature=0).bind_tools(
[complex_tool], tool_choice="complex_tool"
)
better_chain = (
better_model
| | JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True) | langchain.output_parsers.JsonOutputKeyToolsParser |
get_ipython().run_line_magic('pip', 'install "pgvecto_rs[sdk]"')
from typing import List
from langchain.docstore.document import Document
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.fake import FakeEmbeddings
from langchain_community.vectorstores.pgvecto_rs import PGVecto_rs
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)')
get_ipython().system(' pip install "unstructured[all-docs]==0.10.19" pillow pydantic lxml pillow matplotlib tiktoken open_clip_torch torch')
path = "/Users/rlm/Desktop/cpi/"
from langchain_community.document_loaders import PyPDFLoader
loader = PyPDFLoader(path + "cpi.pdf")
pdf_pages = loader.load()
from langchain_text_splitters import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits_pypdf = text_splitter.split_documents(pdf_pages)
all_splits_pypdf_texts = [d.page_content for d in all_splits_pypdf]
from unstructured.partition.pdf import partition_pdf
raw_pdf_elements = partition_pdf(
filename=path + "cpi.pdf",
extract_images_in_pdf=True,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
tables = []
texts = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
tables.append(str(element))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
texts.append(str(element))
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
baseline = Chroma.from_texts(
texts=all_splits_pypdf_texts,
collection_name="baseline",
embedding=OpenAIEmbeddings(),
)
retriever_baseline = baseline.as_retriever()
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \
These summaries will be embedded and used to retrieve the raw text or table elements. \
Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOpenAI(temperature=0, model="gpt-4")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5})
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
import base64
import io
import os
from io import BytesIO
from langchain_core.messages import HumanMessage
from PIL import Image
def encode_image(image_path):
"""Getting the base64 string"""
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
def image_summarize(img_base64, prompt):
"""Image summary"""
chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=1024)
msg = chat.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
},
]
)
]
)
return msg.content
img_base64_list = []
image_summaries = []
prompt = """You are an assistant tasked with summarizing images for retrieval. \
These summaries will be embedded and used to retrieve the raw image. \
Give a concise summary of the image that is well optimized for retrieval."""
for img_file in sorted(os.listdir(path)):
if img_file.endswith(".jpg"):
img_path = os.path.join(path, img_file)
base64_image = encode_image(img_path)
img_base64_list.append(base64_image)
image_summaries.append(image_summarize(base64_image, prompt))
import uuid
from base64 import b64decode
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_core.documents import Document
def create_multi_vector_retriever(
vectorstore, text_summaries, texts, table_summaries, tables, image_summaries, images
):
store = InMemoryStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
def add_documents(retriever, doc_summaries, doc_contents):
doc_ids = [str(uuid.uuid4()) for _ in doc_contents]
summary_docs = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(doc_summaries)
]
retriever.vectorstore.add_documents(summary_docs)
retriever.docstore.mset(list(zip(doc_ids, doc_contents)))
if text_summaries:
add_documents(retriever, text_summaries, texts)
if table_summaries:
add_documents(retriever, table_summaries, tables)
if image_summaries:
add_documents(retriever, image_summaries, images)
return retriever
multi_vector_img = Chroma(
collection_name="multi_vector_img", embedding_function=OpenAIEmbeddings()
)
retriever_multi_vector_img = create_multi_vector_retriever(
multi_vector_img,
text_summaries,
texts,
table_summaries,
tables,
image_summaries,
img_base64_list,
)
query = "What percentage of CPI is dedicated to Housing, and how does it compare to the combined percentage of Medical Care, Apparel, and Other Goods and Services?"
suffix_for_images = " Include any pie charts, graphs, or tables."
docs = retriever_multi_vector_img.get_relevant_documents(query + suffix_for_images)
from IPython.display import HTML, display
def plt_img_base64(img_base64):
image_html = f'<img src="data:image/jpeg;base64,{img_base64}" />'
display(HTML(image_html))
plt_img_base64(docs[1])
multi_vector_text = Chroma(
collection_name="multi_vector_text", embedding_function=OpenAIEmbeddings()
)
retriever_multi_vector_img_summary = create_multi_vector_retriever(
multi_vector_text,
text_summaries,
texts,
table_summaries,
tables,
image_summaries,
image_summaries,
)
from langchain_experimental.open_clip import OpenCLIPEmbeddings
multimodal_embd = Chroma(
collection_name="multimodal_embd", embedding_function=OpenCLIPEmbeddings()
)
image_uris = sorted(
[
os.path.join(path, image_name)
for image_name in os.listdir(path)
if image_name.endswith(".jpg")
]
)
if image_uris:
multimodal_embd.add_images(uris=image_uris)
if texts:
multimodal_embd.add_texts(texts=texts)
if tables:
multimodal_embd.add_texts(texts=tables)
retriever_multimodal_embd = multimodal_embd.as_retriever()
from operator import itemgetter
from langchain_core.runnables import RunnablePassthrough
template = """Answer the question based only on the following context, which can include text and tables:
{context}
Question: {question}
"""
rag_prompt_text = ChatPromptTemplate.from_template(template)
def text_rag_chain(retriever):
"""RAG chain"""
model = ChatOpenAI(temperature=0, model="gpt-4")
chain = (
{"context": retriever, "question": | RunnablePassthrough() | langchain_core.runnables.RunnablePassthrough |
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain_community.utilities import GoogleSearchAPIWrapper
from langchain_openai import OpenAI
search = GoogleSearchAPIWrapper()
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
)
]
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"
{chat_history}
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"],
)
memory = | ConversationBufferMemory(memory_key="chat_history") | langchain.memory.ConversationBufferMemory |
get_ipython().run_line_magic('pip', 'install -qU chromadb langchain langchain-community langchain-openai')
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = TextLoader("../../state_of_the_union.txt")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
for i, doc in enumerate(texts):
doc.metadata["page_chunk"] = i
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-cloud-bigquery')
from langchain_community.document_loaders import BigQueryLoader
BASE_QUERY = """
SELECT
id,
dna_sequence,
organism
FROM (
SELECT
ARRAY (
SELECT
AS STRUCT 1 AS id, "ATTCGA" AS dna_sequence, "Lokiarchaeum sp. (strain GC14_75)." AS organism
UNION ALL
SELECT
AS STRUCT 2 AS id, "AGGCGA" AS dna_sequence, "Heimdallarchaeota archaeon (strain LC_2)." AS organism
UNION ALL
SELECT
AS STRUCT 3 AS id, "TCCGGA" AS dna_sequence, "Acidianus hospitalis (strain W1)." AS organism) AS new_array),
UNNEST(new_array)
"""
loader = BigQueryLoader(BASE_QUERY)
data = loader.load()
print(data)
loader = BigQueryLoader(
BASE_QUERY,
page_content_columns=["dna_sequence", "organism"],
metadata_columns=["id"],
)
data = loader.load()
print(data)
ALIASED_QUERY = """
SELECT
id,
dna_sequence,
organism,
id as source
FROM (
SELECT
ARRAY (
SELECT
AS STRUCT 1 AS id, "ATTCGA" AS dna_sequence, "Lokiarchaeum sp. (strain GC14_75)." AS organism
UNION ALL
SELECT
AS STRUCT 2 AS id, "AGGCGA" AS dna_sequence, "Heimdallarchaeota archaeon (strain LC_2)." AS organism
UNION ALL
SELECT
AS STRUCT 3 AS id, "TCCGGA" AS dna_sequence, "Acidianus hospitalis (strain W1)." AS organism) AS new_array),
UNNEST(new_array)
"""
loader = | BigQueryLoader(ALIASED_QUERY, metadata_columns=["source"]) | langchain_community.document_loaders.BigQueryLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-google-spanner')
from google.colab import auth
auth.authenticate_user()
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
get_ipython().system('gcloud services enable spanner.googleapis.com')
INSTANCE = "my-instance" # @param {type: "string"}
DATABASE = "my-database" # @param {type: "string"}
TABLE_NAME = "vectors_search_data" # @param {type: "string"}
from langchain_google_spanner import SecondaryIndex, SpannerVectorStore, TableColumn
SpannerVectorStore.init_vector_store_table(
instance_id=INSTANCE,
database_id=DATABASE,
table_name=TABLE_NAME,
id_column="row_id",
metadata_columns=[
TableColumn(name="metadata", type="JSON", is_null=True),
| TableColumn(name="title", type="STRING(MAX)", is_null=False) | langchain_google_spanner.TableColumn |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet azure-ai-formrecognizer > /dev/null')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet azure-cognitiveservices-speech > /dev/null')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet azure-ai-textanalytics > /dev/null')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet azure-ai-vision > /dev/null')
import os
os.environ["OPENAI_API_KEY"] = "sk-"
os.environ["AZURE_COGS_KEY"] = ""
os.environ["AZURE_COGS_ENDPOINT"] = ""
os.environ["AZURE_COGS_REGION"] = ""
from langchain_community.agent_toolkits import AzureCognitiveServicesToolkit
toolkit = AzureCognitiveServicesToolkit()
[tool.name for tool in toolkit.get_tools()]
from langchain.agents import AgentType, initialize_agent
from langchain_openai import OpenAI
llm = | OpenAI(temperature=0) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
import os
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import GooseAI
from getpass import getpass
GOOSEAI_API_KEY = getpass()
os.environ["GOOSEAI_API_KEY"] = GOOSEAI_API_KEY
llm = GooseAI()
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
llm_chain = | LLMChain(prompt=prompt, llm=llm) | langchain.chains.LLMChain |
from langchain.chains import GraphCypherQAChain
from langchain_community.graphs import Neo4jGraph
from langchain_openai import ChatOpenAI
graph = Neo4jGraph(
url="bolt://localhost:7687", username="neo4j", password="pleaseletmein"
)
graph.query(
"""
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Actor {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
graph.refresh_schema()
print(graph.schema)
chain = GraphCypherQAChain.from_llm(
ChatOpenAI(temperature=0), graph=graph, verbose=True
)
chain.run("Who played in Top Gun?")
chain = GraphCypherQAChain.from_llm(
ChatOpenAI(temperature=0), graph=graph, verbose=True, top_k=2
)
chain.run("Who played in Top Gun?")
chain = GraphCypherQAChain.from_llm(
ChatOpenAI(temperature=0), graph=graph, verbose=True, return_intermediate_steps=True
)
result = chain("Who played in Top Gun?")
print(f"Intermediate steps: {result['intermediate_steps']}")
print(f"Final answer: {result['result']}")
chain = GraphCypherQAChain.from_llm(
ChatOpenAI(temperature=0), graph=graph, verbose=True, return_direct=True
)
chain.run("Who played in Top Gun?")
from langchain.prompts.prompt import PromptTemplate
CYPHER_GENERATION_TEMPLATE = """Task:Generate Cypher statement to query a graph database.
Instructions:
Use only the provided relationship types and properties in the schema.
Do not use any other relationship types or properties that are not provided.
Schema:
{schema}
Note: Do not include any explanations or apologies in your responses.
Do not respond to any questions that might ask anything else than for you to construct a Cypher statement.
Do not include any text except the generated Cypher statement.
Examples: Here are a few examples of generated Cypher statements for particular questions:
MATCH (m:Movie {{title:"Top Gun"}})<-[:ACTED_IN]-()
RETURN count(*) AS numberOfActors
The question is:
{question}"""
CYPHER_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=CYPHER_GENERATION_TEMPLATE
)
chain = GraphCypherQAChain.from_llm(
ChatOpenAI(temperature=0),
graph=graph,
verbose=True,
cypher_prompt=CYPHER_GENERATION_PROMPT,
)
chain.run("How many people played in Top Gun?")
chain = GraphCypherQAChain.from_llm(
graph=graph,
cypher_llm=ChatOpenAI(temperature=0, model="gpt-3.5-turbo"),
qa_llm= | ChatOpenAI(temperature=0, model="gpt-3.5-turbo-16k") | langchain_openai.ChatOpenAI |
from langchain.retrievers import ParentDocumentRetriever
from langchain.storage import InMemoryStore
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loaders = [
TextLoader("../../paul_graham_essay.txt"),
TextLoader("../../state_of_the_union.txt"),
]
docs = []
for loader in loaders:
docs.extend(loader.load())
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
vectorstore = Chroma(
collection_name="full_documents", embedding_function=OpenAIEmbeddings()
)
store = InMemoryStore()
retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=store,
child_splitter=child_splitter,
)
retriever.add_documents(docs, ids=None)
list(store.yield_keys())
sub_docs = vectorstore.similarity_search("justice breyer")
print(sub_docs[0].page_content)
retrieved_docs = retriever.get_relevant_documents("justice breyer")
len(retrieved_docs[0].page_content)
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)
child_splitter = | RecursiveCharacterTextSplitter(chunk_size=400) | langchain_text_splitters.RecursiveCharacterTextSplitter |
get_ipython().system('pip install pettingzoo pygame rlcard')
import collections
import inspect
import tenacity
from langchain.output_parsers import RegexParser
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
class GymnasiumAgent:
@classmethod
def get_docs(cls, env):
return env.unwrapped.__doc__
def __init__(self, model, env):
self.model = model
self.env = env
self.docs = self.get_docs(env)
self.instructions = """
Your goal is to maximize your return, i.e. the sum of the rewards you receive.
I will give you an observation, reward, terminiation flag, truncation flag, and the return so far, formatted as:
Observation: <observation>
Reward: <reward>
Termination: <termination>
Truncation: <truncation>
Return: <sum_of_rewards>
You will respond with an action, formatted as:
Action: <action>
where you replace <action> with your actual action.
Do nothing else but return the action.
"""
self.action_parser = RegexParser(
regex=r"Action: (.*)", output_keys=["action"], default_output_key="action"
)
self.message_history = []
self.ret = 0
def random_action(self):
action = self.env.action_space.sample()
return action
def reset(self):
self.message_history = [
| SystemMessage(content=self.docs) | langchain.schema.SystemMessage |
from langchain.prompts.pipeline import PipelinePromptTemplate
from langchain.prompts.prompt import PromptTemplate
full_template = """{introduction}
{example}
{start}"""
full_prompt = PromptTemplate.from_template(full_template)
introduction_template = """You are impersonating {person}."""
introduction_prompt = PromptTemplate.from_template(introduction_template)
example_template = """Here's an example of an interaction:
Q: {example_q}
A: {example_a}"""
example_prompt = PromptTemplate.from_template(example_template)
start_template = """Now, do this for real!
Q: {input}
A:"""
start_prompt = | PromptTemplate.from_template(start_template) | langchain.prompts.prompt.PromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet apify-client')
from langchain_community.document_loaders import ApifyDatasetLoader
from langchain_community.document_loaders.base import Document
loader = ApifyDatasetLoader(
dataset_id="your-dataset-id",
dataset_mapping_function=lambda dataset_item: Document(
page_content=dataset_item["text"], metadata={"source": dataset_item["url"]}
),
)
data = loader.load()
from langchain.docstore.document import Document
from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import ApifyDatasetLoader
loader = ApifyDatasetLoader(
dataset_id="your-dataset-id",
dataset_mapping_function=lambda item: Document(
page_content=item["text"] or "", metadata={"source": item["url"]}
),
)
index = | VectorstoreIndexCreator() | langchain.indexes.VectorstoreIndexCreator |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet text-generation transformers google-search-results numexpr langchainhub sentencepiece jinja2')
import os
from langchain_community.llms import HuggingFaceTextGenInference
ENDPOINT_URL = "<YOUR_ENDPOINT_URL_HERE>"
HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
llm = HuggingFaceTextGenInference(
inference_server_url=ENDPOINT_URL,
max_new_tokens=512,
top_k=50,
temperature=0.1,
repetition_penalty=1.03,
server_kwargs={
"headers": {
"Authorization": f"Bearer {HF_TOKEN}",
"Content-Type": "application/json",
}
},
)
from langchain_community.llms import HuggingFaceEndpoint
ENDPOINT_URL = "<YOUR_ENDPOINT_URL_HERE>"
llm = HuggingFaceEndpoint(
endpoint_url=ENDPOINT_URL,
task="text-generation",
model_kwargs={
"max_new_tokens": 512,
"top_k": 50,
"temperature": 0.1,
"repetition_penalty": 1.03,
},
)
from langchain_community.llms import HuggingFaceHub
llm = HuggingFaceHub(
repo_id="HuggingFaceH4/zephyr-7b-beta",
task="text-generation",
model_kwargs={
"max_new_tokens": 512,
"top_k": 30,
"temperature": 0.1,
"repetition_penalty": 1.03,
},
)
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_community.chat_models.huggingface import ChatHuggingFace
messages = [
SystemMessage(content="You're a helpful assistant"),
HumanMessage(
content="What happens when an unstoppable force meets an immovable object?"
),
]
chat_model = | ChatHuggingFace(llm=llm) | langchain_community.chat_models.huggingface.ChatHuggingFace |
from typing import Callable, List
import tenacity
from langchain.output_parsers import RegexParser
from langchain.prompts import PromptTemplate
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
class DialogueAgent:
def __init__(
self,
name: str,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.name = name
self.system_message = system_message
self.model = model
self.prefix = f"{self.name}: "
self.reset()
def reset(self):
self.message_history = ["Here is the conversation so far."]
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
message = self.model(
[
self.system_message,
HumanMessage(content="\n".join(self.message_history + [self.prefix])),
]
)
return message.content
def receive(self, name: str, message: str) -> None:
"""
Concatenates {message} spoken by {name} into message history
"""
self.message_history.append(f"{name}: {message}")
class DialogueSimulator:
def __init__(
self,
agents: List[DialogueAgent],
selection_function: Callable[[int, List[DialogueAgent]], int],
) -> None:
self.agents = agents
self._step = 0
self.select_next_speaker = selection_function
def reset(self):
for agent in self.agents:
agent.reset()
def inject(self, name: str, message: str):
"""
Initiates the conversation with a {message} from {name}
"""
for agent in self.agents:
agent.receive(name, message)
self._step += 1
def step(self) -> tuple[str, str]:
speaker_idx = self.select_next_speaker(self._step, self.agents)
speaker = self.agents[speaker_idx]
message = speaker.send()
for receiver in self.agents:
receiver.receive(speaker.name, message)
self._step += 1
return speaker.name, message
class BiddingDialogueAgent(DialogueAgent):
def __init__(
self,
name,
system_message: SystemMessage,
bidding_template: PromptTemplate,
model: ChatOpenAI,
) -> None:
super().__init__(name, system_message, model)
self.bidding_template = bidding_template
def bid(self) -> str:
"""
Asks the chat model to output a bid to speak
"""
prompt = PromptTemplate(
input_variables=["message_history", "recent_message"],
template=self.bidding_template,
).format(
message_history="\n".join(self.message_history),
recent_message=self.message_history[-1],
)
bid_string = self.model([SystemMessage(content=prompt)]).content
return bid_string
character_names = ["Donald Trump", "Kanye West", "Elizabeth Warren"]
topic = "transcontinental high speed rail"
word_limit = 50
game_description = f"""Here is the topic for the presidential debate: {topic}.
The presidential candidates are: {', '.join(character_names)}."""
player_descriptor_system_message = SystemMessage(
content="You can add detail to the description of each presidential candidate."
)
def generate_character_description(character_name):
character_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(
content=f"""{game_description}
Please reply with a creative description of the presidential candidate, {character_name}, in {word_limit} words or less, that emphasizes their personalities.
Speak directly to {character_name}.
Do not add anything else."""
),
]
character_description = ChatOpenAI(temperature=1.0)(
character_specifier_prompt
).content
return character_description
def generate_character_header(character_name, character_description):
return f"""{game_description}
Your name is {character_name}.
You are a presidential candidate.
Your description is as follows: {character_description}
You are debating the topic: {topic}.
Your goal is to be as creative as possible and make the voters think you are the best candidate.
"""
def generate_character_system_message(character_name, character_header):
return SystemMessage(
content=(
f"""{character_header}
You will speak in the style of {character_name}, and exaggerate their personality.
You will come up with creative ideas related to {topic}.
Do not say the same things over and over again.
Speak in the first person from the perspective of {character_name}
For describing your own body movements, wrap your description in '*'.
Do not change roles!
Do not speak from the perspective of anyone else.
Speak only from the perspective of {character_name}.
Stop speaking the moment you finish speaking from your perspective.
Never forget to keep your response to {word_limit} words!
Do not add anything else.
"""
)
)
character_descriptions = [
generate_character_description(character_name) for character_name in character_names
]
character_headers = [
generate_character_header(character_name, character_description)
for character_name, character_description in zip(
character_names, character_descriptions
)
]
character_system_messages = [
generate_character_system_message(character_name, character_headers)
for character_name, character_headers in zip(character_names, character_headers)
]
for (
character_name,
character_description,
character_header,
character_system_message,
) in zip(
character_names,
character_descriptions,
character_headers,
character_system_messages,
):
print(f"\n\n{character_name} Description:")
print(f"\n{character_description}")
print(f"\n{character_header}")
print(f"\n{character_system_message.content}")
class BidOutputParser(RegexParser):
def get_format_instructions(self) -> str:
return "Your response should be an integer delimited by angled brackets, like this: <int>."
bid_parser = BidOutputParser(
regex=r"<(\d+)>", output_keys=["bid"], default_output_key="bid"
)
def generate_character_bidding_template(character_header):
bidding_template = f"""{character_header}
```
{{message_history}}
```
On the scale of 1 to 10, where 1 is not contradictory and 10 is extremely contradictory, rate how contradictory the following message is to your ideas.
```
{{recent_message}}
```
{bid_parser.get_format_instructions()}
Do nothing else.
"""
return bidding_template
character_bidding_templates = [
generate_character_bidding_template(character_header)
for character_header in character_headers
]
for character_name, bidding_template in zip(
character_names, character_bidding_templates
):
print(f"{character_name} Bidding Template:")
print(bidding_template)
topic_specifier_prompt = [
SystemMessage(content="You can make a task more specific."),
HumanMessage(
content=f"""{game_description}
You are the debate moderator.
Please make the debate topic more specific.
Frame the debate topic as a problem to be solved.
Be creative and imaginative.
Please reply with the specified topic in {word_limit} words or less.
Speak directly to the presidential candidates: {*character_names,}.
Do not add anything else."""
),
]
specified_topic = ChatOpenAI(temperature=1.0)(topic_specifier_prompt).content
print(f"Original topic:\n{topic}\n")
print(f"Detailed topic:\n{specified_topic}\n")
@tenacity.retry(
stop=tenacity.stop_after_attempt(2),
wait=tenacity.wait_none(), # No waiting time between retries
retry=tenacity.retry_if_exception_type(ValueError),
before_sleep=lambda retry_state: print(
f"ValueError occurred: {retry_state.outcome.exception()}, retrying..."
),
retry_error_callback=lambda retry_state: 0,
) # Default value when all retries are exhausted
def ask_for_bid(agent) -> str:
"""
Ask for agent bid and parses the bid into the correct format.
"""
bid_string = agent.bid()
bid = int(bid_parser.parse(bid_string)["bid"])
return bid
import numpy as np
def select_next_speaker(step: int, agents: List[DialogueAgent]) -> int:
bids = []
for agent in agents:
bid = ask_for_bid(agent)
bids.append(bid)
max_value = np.max(bids)
max_indices = np.where(bids == max_value)[0]
idx = np.random.choice(max_indices)
print("Bids:")
for i, (bid, agent) in enumerate(zip(bids, agents)):
print(f"\t{agent.name} bid: {bid}")
if i == idx:
selected_name = agent.name
print(f"Selected: {selected_name}")
print("\n")
return idx
characters = []
for character_name, character_system_message, bidding_template in zip(
character_names, character_system_messages, character_bidding_templates
):
characters.append(
BiddingDialogueAgent(
name=character_name,
system_message=character_system_message,
model= | ChatOpenAI(temperature=0.2) | langchain_openai.ChatOpenAI |
import configparser
config = configparser.ConfigParser()
config.read("./secrets.ini")
openai_api_key = config["OPENAI"]["OPENAI_API_KEY"]
import os
os.environ.update({"OPENAI_API_KEY": openai_api_key})
wikidata_user_agent_header = (
None
if not config.has_section("WIKIDATA")
else config["WIKIDATA"]["WIKIDATA_USER_AGENT_HEADER"]
)
def get_nested_value(o: dict, path: list) -> any:
current = o
for key in path:
try:
current = current[key]
except KeyError:
return None
return current
from typing import Optional
import requests
def vocab_lookup(
search: str,
entity_type: str = "item",
url: str = "https://www.wikidata.org/w/api.php",
user_agent_header: str = wikidata_user_agent_header,
srqiprofile: str = None,
) -> Optional[str]:
headers = {"Accept": "application/json"}
if wikidata_user_agent_header is not None:
headers["User-Agent"] = wikidata_user_agent_header
if entity_type == "item":
srnamespace = 0
srqiprofile = "classic_noboostlinks" if srqiprofile is None else srqiprofile
elif entity_type == "property":
srnamespace = 120
srqiprofile = "classic" if srqiprofile is None else srqiprofile
else:
raise ValueError("entity_type must be either 'property' or 'item'")
params = {
"action": "query",
"list": "search",
"srsearch": search,
"srnamespace": srnamespace,
"srlimit": 1,
"srqiprofile": srqiprofile,
"srwhat": "text",
"format": "json",
}
response = requests.get(url, headers=headers, params=params)
if response.status_code == 200:
title = get_nested_value(response.json(), ["query", "search", 0, "title"])
if title is None:
return f"I couldn't find any {entity_type} for '{search}'. Please rephrase your request and try again"
return title.split(":")[-1]
else:
return "Sorry, I got an error. Please try again."
print(vocab_lookup("Malin 1"))
print(vocab_lookup("instance of", entity_type="property"))
print(vocab_lookup("Ceci n'est pas un q-item"))
import json
from typing import Any, Dict, List
import requests
def run_sparql(
query: str,
url="https://query.wikidata.org/sparql",
user_agent_header: str = wikidata_user_agent_header,
) -> List[Dict[str, Any]]:
headers = {"Accept": "application/json"}
if wikidata_user_agent_header is not None:
headers["User-Agent"] = wikidata_user_agent_header
response = requests.get(
url, headers=headers, params={"query": query, "format": "json"}
)
if response.status_code != 200:
return "That query failed. Perhaps you could try a different one?"
results = get_nested_value(response.json(), ["results", "bindings"])
return json.dumps(results)
run_sparql("SELECT (COUNT(?children) as ?count) WHERE { wd:Q1339 wdt:P40 ?children . }")
import re
from typing import List, Union
from langchain.agents import (
AgentExecutor,
AgentOutputParser,
LLMSingleActionAgent,
Tool,
)
from langchain.chains import LLMChain
from langchain.prompts import StringPromptTemplate
from langchain_core.agents import AgentAction, AgentFinish
tools = [
Tool(
name="ItemLookup",
func=(lambda x: vocab_lookup(x, entity_type="item")),
description="useful for when you need to know the q-number for an item",
),
Tool(
name="PropertyLookup",
func=(lambda x: vocab_lookup(x, entity_type="property")),
description="useful for when you need to know the p-number for a property",
),
Tool(
name="SparqlQueryRunner",
func=run_sparql,
description="useful for getting results from a wikibase",
),
]
template = """
Answer the following questions by running a sparql query against a wikibase where the p and q items are
completely unknown to you. You will need to discover the p and q items before you can generate the sparql.
Do not assume you know the p and q items for any concepts. Always use tools to find all p and q items.
After you generate the sparql, you should run it. The results will be returned in json.
Summarize the json results in natural language.
You may assume the following prefixes:
PREFIX wd: <http://www.wikidata.org/entity/>
PREFIX wdt: <http://www.wikidata.org/prop/direct/>
PREFIX p: <http://www.wikidata.org/prop/>
PREFIX ps: <http://www.wikidata.org/prop/statement/>
When generating sparql:
* Try to avoid "count" and "filter" queries if possible
* Never enclose the sparql in back-quotes
You have access to the following tools:
{tools}
Use the following format:
Question: the input question for which you must provide a natural language answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Question: {input}
{agent_scratchpad}"""
class CustomPromptTemplate(StringPromptTemplate):
template: str
tools: List[Tool]
def format(self, **kwargs) -> str:
intermediate_steps = kwargs.pop("intermediate_steps")
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
kwargs["agent_scratchpad"] = thoughts
kwargs["tools"] = "\n".join(
[f"{tool.name}: {tool.description}" for tool in self.tools]
)
kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
return self.template.format(**kwargs)
prompt = CustomPromptTemplate(
template=template,
tools=tools,
input_variables=["input", "intermediate_steps"],
)
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
if "Final Answer:" in llm_output:
return AgentFinish(
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
regex = r"Action: (.*?)[\n]*Action Input:[\s]*(.*)"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
action = match.group(1).strip()
action_input = match.group(2)
return AgentAction(
tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output
)
output_parser = CustomOutputParser()
from langchain_openai import ChatOpenAI
llm = | ChatOpenAI(model_name="gpt-4", temperature=0) | langchain_openai.ChatOpenAI |
REGION = "us-central1" # @param {type:"string"}
INSTANCE = "test-instance" # @param {type:"string"}
DATABASE = "test" # @param {type:"string"}
TABLE_NAME = "test-default" # @param {type:"string"}
get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-cloud-sql-mysql')
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
from google.colab import auth
auth.authenticate_user()
get_ipython().system('gcloud services enable sqladmin.googleapis.com')
from langchain_google_cloud_sql_mysql import MySQLEngine
engine = MySQLEngine.from_instance(
project_id=PROJECT_ID, region=REGION, instance=INSTANCE, database=DATABASE
)
engine.init_document_table(TABLE_NAME, overwrite_existing=True)
from langchain_core.documents import Document
from langchain_google_cloud_sql_mysql import MySQLDocumentSaver
test_docs = [
Document(
page_content="Apple Granny Smith 150 0.99 1",
metadata={"fruit_id": 1},
),
Document(
page_content="Banana Cavendish 200 0.59 0",
metadata={"fruit_id": 2},
),
Document(
page_content="Orange Navel 80 1.29 1",
metadata={"fruit_id": 3},
),
]
saver = MySQLDocumentSaver(engine=engine, table_name=TABLE_NAME)
saver.add_documents(test_docs)
from langchain_google_cloud_sql_mysql import MySQLLoader
loader = MySQLLoader(engine=engine, table_name=TABLE_NAME)
docs = loader.lazy_load()
for doc in docs:
print("Loaded documents:", doc)
from langchain_google_cloud_sql_mysql import MySQLLoader
loader = MySQLLoader(
engine=engine,
query=f"select * from `{TABLE_NAME}` where JSON_EXTRACT(langchain_metadata, '$.fruit_id') = 1;",
)
onedoc = loader.load()
onedoc
from langchain_google_cloud_sql_mysql import MySQLLoader
loader = MySQLLoader(engine=engine, table_name=TABLE_NAME)
docs = loader.load()
print("Documents before delete:", docs)
saver.delete(onedoc)
print("Documents after delete:", loader.load())
import sqlalchemy
with engine.connect() as conn:
conn.execute(sqlalchemy.text(f"DROP TABLE IF EXISTS `{TABLE_NAME}`"))
conn.commit()
conn.execute(
sqlalchemy.text(
f"""
CREATE TABLE IF NOT EXISTS `{TABLE_NAME}`(
fruit_id INT AUTO_INCREMENT PRIMARY KEY,
fruit_name VARCHAR(100) NOT NULL,
variety VARCHAR(50),
quantity_in_stock INT NOT NULL,
price_per_unit DECIMAL(6,2) NOT NULL,
organic TINYINT(1) NOT NULL
)
"""
)
)
conn.execute(
sqlalchemy.text(
f"""
INSERT INTO `{TABLE_NAME}` (fruit_name, variety, quantity_in_stock, price_per_unit, organic)
VALUES
('Apple', 'Granny Smith', 150, 0.99, 1),
('Banana', 'Cavendish', 200, 0.59, 0),
('Orange', 'Navel', 80, 1.29, 1);
"""
)
)
conn.commit()
loader = MySQLLoader(
engine=engine,
table_name=TABLE_NAME,
)
loader.load()
loader = MySQLLoader(
engine=engine,
table_name=TABLE_NAME,
content_columns=[
"variety",
"quantity_in_stock",
"price_per_unit",
"organic",
],
metadata_columns=["fruit_id", "fruit_name"],
)
loader.load()
engine.init_document_table(
TABLE_NAME,
metadata_columns=[
sqlalchemy.Column(
"fruit_name",
sqlalchemy.UnicodeText,
primary_key=False,
nullable=True,
),
sqlalchemy.Column(
"organic",
sqlalchemy.Boolean,
primary_key=False,
nullable=True,
),
],
content_column="description",
metadata_json_column="other_metadata",
overwrite_existing=True,
)
test_docs = [
Document(
page_content="Granny Smith 150 0.99",
metadata={"fruit_id": 1, "fruit_name": "Apple", "organic": 1},
),
]
saver = MySQLDocumentSaver(
engine=engine,
table_name=TABLE_NAME,
content_column="description",
metadata_json_column="other_metadata",
)
saver.add_documents(test_docs)
with engine.connect() as conn:
result = conn.execute(sqlalchemy.text(f"select * from `{TABLE_NAME}`;"))
print(result.keys())
print(result.fetchall())
loader = | MySQLLoader(engine=engine, table_name=TABLE_NAME) | langchain_google_cloud_sql_mysql.MySQLLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet marqo')
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Marqo
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory
from langchain.prompts import PromptTemplate
from langchain_community.utilities import GoogleSearchAPIWrapper
from langchain_openai import OpenAI
template = """This is a conversation between a human and a bot:
{chat_history}
Write a summary of the conversation for {input}:
"""
prompt = PromptTemplate(input_variables=["input", "chat_history"], template=template)
memory = ConversationBufferMemory(memory_key="chat_history")
readonlymemory = ReadOnlySharedMemory(memory=memory)
summary_chain = LLMChain(
llm=OpenAI(),
prompt=prompt,
verbose=True,
memory=readonlymemory, # use the read-only memory to prevent the tool from modifying the memory
)
search = GoogleSearchAPIWrapper()
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
),
Tool(
name="Summary",
func=summary_chain.run,
description="useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.",
),
]
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"
{chat_history}
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"],
)
llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)
agent = | ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True) | langchain.agents.ZeroShotAgent |
from langchain_community.chat_models import ChatDatabricks
from langchain_core.messages import HumanMessage
from mlflow.deployments import get_deploy_client
client = get_deploy_client("databricks")
secret = "secrets/<scope>/openai-api-key" # replace `<scope>` with your scope
name = "my-chat" # rename this if my-chat already exists
client.create_endpoint(
name=name,
config={
"served_entities": [
{
"name": "my-chat",
"external_model": {
"name": "gpt-4",
"provider": "openai",
"task": "llm/v1/chat",
"openai_config": {
"openai_api_key": "{{" + secret + "}}",
},
},
}
],
},
)
chat = ChatDatabricks(
target_uri="databricks",
endpoint=name,
temperature=0.1,
)
chat([HumanMessage(content="hello")])
from langchain_community.embeddings import DatabricksEmbeddings
embeddings = DatabricksEmbeddings(endpoint="databricks-bge-large-en")
embeddings.embed_query("hello")[:3]
from langchain_community.llms import Databricks
llm = Databricks(endpoint_name="dolly")
llm("How are you?")
llm("How are you?", stop=["."])
import os
import dbutils
os.environ["DATABRICKS_TOKEN"] = dbutils.secrets.get("myworkspace", "api_token")
llm = Databricks(host="myworkspace.cloud.databricks.com", endpoint_name="dolly")
llm("How are you?")
llm = Databricks(endpoint_name="dolly", model_kwargs={"temperature": 0.1})
llm("How are you?")
def transform_input(**request):
full_prompt = f"""{request["prompt"]}
Be Concise.
"""
request["prompt"] = full_prompt
return request
llm = Databricks(endpoint_name="dolly", transform_input_fn=transform_input)
llm("How are you?")
llm = Databricks(cluster_driver_port="7777")
llm("How are you?")
llm = Databricks(cluster_id="0000-000000-xxxxxxxx", cluster_driver_port="7777")
llm("How are you?")
llm = | Databricks(cluster_driver_port="7777", model_kwargs={"temperature": 0.1}) | langchain_community.llms.Databricks |
import logging
from langchain.retrievers import RePhraseQueryRetriever
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
logging.basicConfig()
logging.getLogger("langchain.retrievers.re_phraser").setLevel(logging.INFO)
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
text_splitter = | RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) | langchain_text_splitters.RecursiveCharacterTextSplitter |
from typing import Callable, List
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
class DialogueAgent:
def __init__(
self,
name: str,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.name = name
self.system_message = system_message
self.model = model
self.prefix = f"{self.name}: "
self.reset()
def reset(self):
self.message_history = ["Here is the conversation so far."]
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
message = self.model(
[
self.system_message,
HumanMessage(content="\n".join(self.message_history + [self.prefix])),
]
)
return message.content
def receive(self, name: str, message: str) -> None:
"""
Concatenates {message} spoken by {name} into message history
"""
self.message_history.append(f"{name}: {message}")
class DialogueSimulator:
def __init__(
self,
agents: List[DialogueAgent],
selection_function: Callable[[int, List[DialogueAgent]], int],
) -> None:
self.agents = agents
self._step = 0
self.select_next_speaker = selection_function
def reset(self):
for agent in self.agents:
agent.reset()
def inject(self, name: str, message: str):
"""
Initiates the conversation with a {message} from {name}
"""
for agent in self.agents:
agent.receive(name, message)
self._step += 1
def step(self) -> tuple[str, str]:
speaker_idx = self.select_next_speaker(self._step, self.agents)
speaker = self.agents[speaker_idx]
message = speaker.send()
for receiver in self.agents:
receiver.receive(speaker.name, message)
self._step += 1
return speaker.name, message
protagonist_name = "Harry Potter"
storyteller_name = "Dungeon Master"
quest = "Find all of Lord Voldemort's seven horcruxes."
word_limit = 50 # word limit for task brainstorming
game_description = f"""Here is the topic for a Dungeons & Dragons game: {quest}.
There is one player in this game: the protagonist, {protagonist_name}.
The story is narrated by the storyteller, {storyteller_name}."""
player_descriptor_system_message = SystemMessage(
content="You can add detail to the description of a Dungeons & Dragons player."
)
protagonist_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(
content=f"""{game_description}
Please reply with a creative description of the protagonist, {protagonist_name}, in {word_limit} words or less.
Speak directly to {protagonist_name}.
Do not add anything else."""
),
]
protagonist_description = ChatOpenAI(temperature=1.0)(
protagonist_specifier_prompt
).content
storyteller_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(
content=f"""{game_description}
Please reply with a creative description of the storyteller, {storyteller_name}, in {word_limit} words or less.
Speak directly to {storyteller_name}.
Do not add anything else."""
),
]
storyteller_description = ChatOpenAI(temperature=1.0)(
storyteller_specifier_prompt
).content
print("Protagonist Description:")
print(protagonist_description)
print("Storyteller Description:")
print(storyteller_description)
protagonist_system_message = SystemMessage(
content=(
f"""{game_description}
Never forget you are the protagonist, {protagonist_name}, and I am the storyteller, {storyteller_name}.
Your character description is as follows: {protagonist_description}.
You will propose actions you plan to take and I will explain what happens when you take those actions.
Speak in the first person from the perspective of {protagonist_name}.
For describing your own body movements, wrap your description in '*'.
Do not change roles!
Do not speak from the perspective of {storyteller_name}.
Do not forget to finish speaking by saying, 'It is your turn, {storyteller_name}.'
Do not add anything else.
Remember you are the protagonist, {protagonist_name}.
Stop speaking the moment you finish speaking from your perspective.
"""
)
)
storyteller_system_message = SystemMessage(
content=(
f"""{game_description}
Never forget you are the storyteller, {storyteller_name}, and I am the protagonist, {protagonist_name}.
Your character description is as follows: {storyteller_description}.
I will propose actions I plan to take and you will explain what happens when I take those actions.
Speak in the first person from the perspective of {storyteller_name}.
For describing your own body movements, wrap your description in '*'.
Do not change roles!
Do not speak from the perspective of {protagonist_name}.
Do not forget to finish speaking by saying, 'It is your turn, {protagonist_name}.'
Do not add anything else.
Remember you are the storyteller, {storyteller_name}.
Stop speaking the moment you finish speaking from your perspective.
"""
)
)
quest_specifier_prompt = [
SystemMessage(content="You can make a task more specific."),
HumanMessage(
content=f"""{game_description}
You are the storyteller, {storyteller_name}.
Please make the quest more specific. Be creative and imaginative.
Please reply with the specified quest in {word_limit} words or less.
Speak directly to the protagonist {protagonist_name}.
Do not add anything else."""
),
]
specified_quest = ChatOpenAI(temperature=1.0)(quest_specifier_prompt).content
print(f"Original quest:\n{quest}\n")
print(f"Detailed quest:\n{specified_quest}\n")
protagonist = DialogueAgent(
name=protagonist_name,
system_message=protagonist_system_message,
model=ChatOpenAI(temperature=0.2),
)
storyteller = DialogueAgent(
name=storyteller_name,
system_message=storyteller_system_message,
model= | ChatOpenAI(temperature=0.2) | langchain_openai.ChatOpenAI |
get_ipython().run_cell_magic('writefile', 'telegram_conversation.json', '{\n "name": "Jiminy",\n "type": "personal_chat",\n "id": 5965280513,\n "messages": [\n {\n "id": 1,\n "type": "message",\n "date": "2023-08-23T13:11:23",\n "date_unixtime": "1692821483",\n "from": "Jiminy Cricket",\n "from_id": "user123450513",\n "text": "You better trust your conscience",\n "text_entities": [\n {\n "type": "plain",\n "text": "You better trust your conscience"\n }\n ]\n },\n {\n "id": 2,\n "type": "message",\n "date": "2023-08-23T13:13:20",\n "date_unixtime": "1692821600",\n "from": "Batman & Robin",\n "from_id": "user6565661032",\n "text": "What did you just say?",\n "text_entities": [\n {\n "type": "plain",\n "text": "What did you just say?"\n }\n ]\n }\n ]\n}\n')
from langchain_community.chat_loaders.telegram import TelegramChatLoader
loader = TelegramChatLoader(
path="./telegram_conversation.json",
)
from typing import List
from langchain_community.chat_loaders.base import ChatSession
from langchain_community.chat_loaders.utils import (
map_ai_messages,
merge_chat_runs,
)
raw_messages = loader.lazy_load()
merged_messages = merge_chat_runs(raw_messages)
messages: List[ChatSession] = list(
map_ai_messages(merged_messages, sender="Jiminy Cricket")
)
from langchain_openai import ChatOpenAI
llm = | ChatOpenAI() | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('reload_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
from datetime import datetime
from langchain.agents import AgentType, initialize_agent
from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit
from langchain_community.utilities.clickup import ClickupAPIWrapper
from langchain_openai import OpenAI
oauth_client_id = "ABC..."
oauth_client_secret = "123..."
redirect_uri = "https://google.com"
print("Click this link, select your workspace, click `Connect Workspace`")
print(ClickupAPIWrapper.get_access_code_url(oauth_client_id, redirect_uri))
code = "THISISMYCODERIGHTHERE"
access_token = ClickupAPIWrapper.get_access_token(
oauth_client_id, oauth_client_secret, code
)
clickup_api_wrapper = ClickupAPIWrapper(access_token=access_token)
toolkit = | ClickupToolkit.from_clickup_api_wrapper(clickup_api_wrapper) | langchain_community.agent_toolkits.clickup.toolkit.ClickupToolkit.from_clickup_api_wrapper |
from langchain_community.embeddings.fake import FakeEmbeddings
from langchain_community.vectorstores import Tair
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
from typing import Optional
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_experimental.autonomous_agents import BabyAGI
from langchain_openai import OpenAI, OpenAIEmbeddings
get_ipython().run_line_magic('pip', 'install faiss-cpu > /dev/null')
get_ipython().run_line_magic('pip', 'install google-search-results > /dev/null')
from langchain.docstore import InMemoryDocstore
from langchain_community.vectorstores import FAISS
embeddings_model = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)')
get_ipython().system(' pip install "unstructured[all-docs]==0.10.19" pillow pydantic lxml pillow matplotlib tiktoken open_clip_torch torch')
path = "/Users/rlm/Desktop/photos/"
from unstructured.partition.pdf import partition_pdf
raw_pdf_elements = partition_pdf(
filename=path + "photos.pdf",
extract_images_in_pdf=True,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
tables = []
texts = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
tables.append(str(element))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
texts.append(str(element))
import os
import uuid
import chromadb
import numpy as np
from langchain_community.vectorstores import Chroma
from langchain_experimental.open_clip import OpenCLIPEmbeddings
from PIL import Image as _PILImage
vectorstore = Chroma(
collection_name="mm_rag_clip_photos", embedding_function=OpenCLIPEmbeddings()
)
image_uris = sorted(
[
os.path.join(path, image_name)
for image_name in os.listdir(path)
if image_name.endswith(".jpg")
]
)
vectorstore.add_images(uris=image_uris)
vectorstore.add_texts(texts=texts)
retriever = vectorstore.as_retriever()
import base64
import io
from io import BytesIO
import numpy as np
from PIL import Image
def resize_base64_image(base64_string, size=(128, 128)):
"""
Resize an image encoded as a Base64 string.
Args:
base64_string (str): Base64 string of the original image.
size (tuple): Desired size of the image as (width, height).
Returns:
str: Base64 string of the resized image.
"""
img_data = base64.b64decode(base64_string)
img = Image.open(io.BytesIO(img_data))
resized_img = img.resize(size, Image.LANCZOS)
buffered = io.BytesIO()
resized_img.save(buffered, format=img.format)
return base64.b64encode(buffered.getvalue()).decode("utf-8")
def is_base64(s):
"""Check if a string is Base64 encoded"""
try:
return base64.b64encode(base64.b64decode(s)) == s.encode()
except Exception:
return False
def split_image_text_types(docs):
"""Split numpy array images and texts"""
images = []
text = []
for doc in docs:
doc = doc.page_content # Extract Document contents
if is_base64(doc):
images.append(
resize_base64_image(doc, size=(250, 250))
) # base64 encoded str
else:
text.append(doc)
return {"images": images, "texts": text}
from operator import itemgetter
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_openai import ChatOpenAI
def prompt_func(data_dict):
formatted_texts = "\n".join(data_dict["context"]["texts"])
messages = []
if data_dict["context"]["images"]:
image_message = {
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{data_dict['context']['images'][0]}"
},
}
messages.append(image_message)
text_message = {
"type": "text",
"text": (
"As an expert art critic and historian, your task is to analyze and interpret images, "
"considering their historical and cultural significance. Alongside the images, you will be "
"provided with related text to offer context. Both will be retrieved from a vectorstore based "
"on user-input keywords. Please use your extensive knowledge and analytical skills to provide a "
"comprehensive summary that includes:\n"
"- A detailed description of the visual elements in the image.\n"
"- The historical and cultural context of the image.\n"
"- An interpretation of the image's symbolism and meaning.\n"
"- Connections between the image and the related text.\n\n"
f"User-provided keywords: {data_dict['question']}\n\n"
"Text and / or tables:\n"
f"{formatted_texts}"
),
}
messages.append(text_message)
return [ | HumanMessage(content=messages) | langchain_core.messages.HumanMessage |
get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-memorystore-redis')
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
from google.colab import auth
auth.authenticate_user()
import redis
from langchain_google_memorystore_redis import (
DistanceStrategy,
HNSWConfig,
RedisVectorStore,
)
redis_client = redis.from_url("redis://127.0.0.1:6379")
index_config = HNSWConfig(
name="my_vector_index", distance_strategy=DistanceStrategy.COSINE, vector_size=128
)
RedisVectorStore.init_index(client=redis_client, index_config=index_config)
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("./state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
from langchain_community.embeddings.fake import FakeEmbeddings
embeddings = FakeEmbeddings(size=128)
redis_client = redis.from_url("redis://127.0.0.1:6379")
rvs = RedisVectorStore.from_documents(
docs, embedding=embeddings, client=redis_client, index_name="my_vector_index"
)
rvs = RedisVectorStore(
client=redis_client, index_name="my_vector_index", embeddings=embeddings
)
ids = rvs.add_texts(
texts=[d.page_content for d in docs], metadatas=[d.metadata for d in docs]
)
import pprint
query = "What did the president say about Ketanji Brown Jackson"
knn_results = rvs.similarity_search(query=query)
pprint.pprint(knn_results)
rq_results = rvs.similarity_search_with_score(query=query, distance_threshold=0.8)
pprint.pprint(rq_results)
mmr_results = rvs.max_marginal_relevance_search(query=query, lambda_mult=0.90)
pprint.pprint(mmr_results)
retriever = rvs.as_retriever()
results = retriever.invoke(query)
pprint.pprint(results)
rvs.delete(ids)
| RedisVectorStore.drop_index(client=redis_client, index_name="my_vector_index") | langchain_google_memorystore_redis.RedisVectorStore.drop_index |
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
conversation = ConversationChain(
llm=llm, verbose=True, memory=ConversationBufferMemory()
)
conversation.predict(input="Hi there!")
conversation.predict(input="What's the weather?")
from langchain.prompts.prompt import PromptTemplate
template = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
{history}
Human: {input}
AI Assistant:"""
PROMPT = PromptTemplate(input_variables=["history", "input"], template=template)
conversation = ConversationChain(
prompt=PROMPT,
llm=llm,
verbose=True,
memory= | ConversationBufferMemory(ai_prefix="AI Assistant") | langchain.memory.ConversationBufferMemory |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic')
import os
import boto3
comprehend_client = boto3.client("comprehend", region_name="us-east-1")
from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain
comprehend_moderation = AmazonComprehendModerationChain(
client=comprehend_client,
verbose=True, # optional
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comprehend_moderation
| {"input": (lambda x: x["output"]) | llm}
| comprehend_moderation
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?"
}
)
except ModerationPiiError as e:
print(str(e))
else:
print(response["output"])
from langchain_experimental.comprehend_moderation import (
BaseModerationConfig,
ModerationPiiConfig,
ModerationPromptSafetyConfig,
ModerationToxicityConfig,
)
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.5)
moderation_config = BaseModerationConfig(
filters=[pii_config, toxicity_config, prompt_safety_config]
)
comp_moderation_with_config = AmazonComprehendModerationChain(
moderation_config=moderation_config, # specify the configuration
client=comprehend_client, # optionally pass the Boto3 Client
verbose=True,
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comp_moderation_with_config
| {"input": (lambda x: x["output"]) | llm}
| comp_moderation_with_config
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-45-7890. Can you give me some more samples?"
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])
from langchain_experimental.comprehend_moderation import BaseModerationCallbackHandler
class MyModCallback(BaseModerationCallbackHandler):
async def on_after_pii(self, output_beacon, unique_id):
import json
moderation_type = output_beacon["moderation_type"]
chain_id = output_beacon["moderation_chain_id"]
with open(f"output-{moderation_type}-{chain_id}.json", "w") as file:
data = {"beacon_data": output_beacon, "unique_id": unique_id}
json.dump(data, file)
"""
async def on_after_toxicity(self, output_beacon, unique_id):
pass
async def on_after_prompt_safety(self, output_beacon, unique_id):
pass
"""
my_callback = MyModCallback()
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
moderation_config = BaseModerationConfig(filters=[pii_config, toxicity_config])
comp_moderation_with_config = AmazonComprehendModerationChain(
moderation_config=moderation_config, # specify the configuration
client=comprehend_client, # optionally pass the Boto3 Client
unique_id="john.doe@email.com", # A unique ID
moderation_callback=my_callback, # BaseModerationCallbackHandler
verbose=True,
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comp_moderation_with_config
| {"input": (lambda x: x["output"]) | llm}
| comp_moderation_with_config
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-456-7890. Can you give me some more samples?"
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])
get_ipython().run_line_magic('pip', 'install --upgrade --quiet huggingface_hub')
import os
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "<YOUR HF TOKEN HERE>"
repo_id = "google/flan-t5-xxl"
from langchain.prompts import PromptTemplate
from langchain_community.llms import HuggingFaceHub
template = """{question}"""
prompt = PromptTemplate.from_template(template)
llm = HuggingFaceHub(
repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 256}
)
pii_config = ModerationPiiConfig(
labels=["SSN", "CREDIT_DEBIT_NUMBER"], redact=True, mask_character="X"
)
toxicity_config = ModerationToxicityConfig(threshold=0.5)
prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.8)
moderation_config_1 = BaseModerationConfig(
filters=[pii_config, toxicity_config, prompt_safety_config]
)
moderation_config_2 = BaseModerationConfig(filters=[pii_config])
amazon_comp_moderation = AmazonComprehendModerationChain(
moderation_config=moderation_config_1,
client=comprehend_client,
moderation_callback=my_callback,
verbose=True,
)
amazon_comp_moderation_out = AmazonComprehendModerationChain(
moderation_config=moderation_config_2, client=comprehend_client, verbose=True
)
chain = (
prompt
| amazon_comp_moderation
| {"input": (lambda x: x["output"]) | llm}
| amazon_comp_moderation_out
)
try:
response = chain.invoke(
{
"question": """What is John Doe's address, phone number and SSN from the following text?
John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at johndoe@example.com reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place.
"""
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])
endpoint_name = "<SAGEMAKER_ENDPOINT_NAME>" # replace with your SageMaker Endpoint name
region = "<REGION>" # replace with your SageMaker Endpoint region
import json
from langchain.prompts import PromptTemplate
from langchain_community.llms import SagemakerEndpoint
from langchain_community.llms.sagemaker_endpoint import LLMContentHandler
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: dict) -> bytes:
input_str = json.dumps({"text_inputs": prompt, **model_kwargs})
return input_str.encode("utf-8")
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json["generated_texts"][0]
content_handler = ContentHandler()
template = """From the following 'Document', precisely answer the 'Question'. Do not add any spurious information in your answer.
Document: John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at johndoe@example.com reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place.
Question: {question}
Answer:
"""
llm_prompt = PromptTemplate.from_template(template)
llm = SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region,
model_kwargs={
"temperature": 0.95,
"max_length": 200,
"num_return_sequences": 3,
"top_k": 50,
"top_p": 0.95,
"do_sample": True,
},
content_handler=content_handler,
)
pii_config = | ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") | langchain_experimental.comprehend_moderation.ModerationPiiConfig |
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)')
get_ipython().system(' pip install "unstructured[all-docs]==0.10.19" pillow pydantic lxml pillow matplotlib tiktoken open_clip_torch torch')
path = "/Users/rlm/Desktop/cpi/"
from langchain_community.document_loaders import PyPDFLoader
loader = PyPDFLoader(path + "cpi.pdf")
pdf_pages = loader.load()
from langchain_text_splitters import RecursiveCharacterTextSplitter
text_splitter = | RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) | langchain_text_splitters.RecursiveCharacterTextSplitter |
from langchain.chains import GraphCypherQAChain
from langchain_community.graphs import Neo4jGraph
from langchain_openai import ChatOpenAI
graph = Neo4jGraph(
url="bolt://localhost:7687", username="neo4j", password="pleaseletmein"
)
graph.query(
"""
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Actor {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
graph.refresh_schema()
print(graph.schema)
chain = GraphCypherQAChain.from_llm(
ChatOpenAI(temperature=0), graph=graph, verbose=True
)
chain.run("Who played in Top Gun?")
chain = GraphCypherQAChain.from_llm(
ChatOpenAI(temperature=0), graph=graph, verbose=True, top_k=2
)
chain.run("Who played in Top Gun?")
chain = GraphCypherQAChain.from_llm(
ChatOpenAI(temperature=0), graph=graph, verbose=True, return_intermediate_steps=True
)
result = chain("Who played in Top Gun?")
print(f"Intermediate steps: {result['intermediate_steps']}")
print(f"Final answer: {result['result']}")
chain = GraphCypherQAChain.from_llm(
| ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryByteStore
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loaders = [
TextLoader("../../paul_graham_essay.txt"),
TextLoader("../../state_of_the_union.txt"),
]
docs = []
for loader in loaders:
docs.extend(loader.load())
text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)
docs = text_splitter.split_documents(docs)
vectorstore = Chroma(
collection_name="full_documents", embedding_function=OpenAIEmbeddings()
)
store = InMemoryByteStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
byte_store=store,
id_key=id_key,
)
import uuid
doc_ids = [str(uuid.uuid4()) for _ in docs]
child_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
sub_docs = []
for i, doc in enumerate(docs):
_id = doc_ids[i]
_sub_docs = child_text_splitter.split_documents([doc])
for _doc in _sub_docs:
_doc.metadata[id_key] = _id
sub_docs.extend(_sub_docs)
retriever.vectorstore.add_documents(sub_docs)
retriever.docstore.mset(list(zip(doc_ids, docs)))
retriever.vectorstore.similarity_search("justice breyer")[0]
len(retriever.get_relevant_documents("justice breyer")[0].page_content)
from langchain.retrievers.multi_vector import SearchType
retriever.search_type = SearchType.mmr
len(retriever.get_relevant_documents("justice breyer")[0].page_content)
import uuid
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
chain = (
{"doc": lambda x: x.page_content}
| ChatPromptTemplate.from_template("Summarize the following document:\n\n{doc}")
| | ChatOpenAI(max_retries=0) | langchain_openai.ChatOpenAI |
from typing import List
from langchain.output_parsers import YamlOutputParser
from langchain.prompts import PromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0)
class Joke(BaseModel):
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
joke_query = "Tell me a joke."
parser = | YamlOutputParser(pydantic_object=Joke) | langchain.output_parsers.YamlOutputParser |
meals = [
"Beef Enchiladas with Feta cheese. Mexican-Greek fusion",
"Chicken Flatbreads with red sauce. Italian-Mexican fusion",
"Veggie sweet potato quesadillas with vegan cheese",
"One-Pan Tortelonni bake with peppers and onions",
]
from langchain_openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-instruct")
from langchain.prompts import PromptTemplate
PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}".
Embed the meal into the given text: "{text_to_personalize}".
Prepend a personalized message including the user's name "{user}"
and their preference "{preference}".
Make it sound good.
"""
PROMPT = PromptTemplate(
input_variables=["meal", "text_to_personalize", "user", "preference"],
template=PROMPT_TEMPLATE,
)
import langchain_experimental.rl_chain as rl_chain
chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs \
believe you will love it!",
)
print(response["response"])
for _ in range(5):
try:
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
except Exception as e:
print(e)
print(response["response"])
print()
scoring_criteria_template = (
"Given {preference} rank how good or bad this selection is {meal}"
)
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=rl_chain.AutoSelectionScorer(
llm=llm, scoring_criteria_template_str=scoring_criteria_template
),
)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
print(response["response"])
selection_metadata = response["selection_metadata"]
print(
f"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}"
)
class CustomSelectionScorer(rl_chain.SelectionScorer):
def score_response(
self, inputs, llm_response: str, event: rl_chain.PickBestEvent
) -> float:
print(event.based_on)
print(event.to_select_from)
selected_meal = event.to_select_from["meal"][event.selected.index]
print(f"selected meal: {selected_meal}")
if "Tom" in event.based_on["user"]:
if "Vegetarian" in event.based_on["preference"]:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 0.0
else:
return 1.0
else:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 1.0
else:
return 0.0
else:
raise NotImplementedError("I don't know how to score this user")
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=CustomSelectionScorer(),
)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user= | rl_chain.BasedOn("Tom") | langchain_experimental.rl_chain.BasedOn |
import uuid
from pathlib import Path
import langchain
import torch
from bs4 import BeautifulSoup as Soup
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryByteStore, LocalFileStore
from langchain_community.document_loaders.recursive_url_loader import (
RecursiveUrlLoader,
)
from langchain_community.vectorstores import Chroma
from langchain_text_splitters import RecursiveCharacterTextSplitter # noqa
DOCSTORE_DIR = "."
DOCSTORE_ID_KEY = "doc_id"
loader = RecursiveUrlLoader(
"https://ar5iv.labs.arxiv.org/html/1706.03762",
max_depth=2,
extractor=lambda x: Soup(x, "html.parser").text,
)
data = loader.load()
print(f"Loaded {len(data)} documents")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
print(f"Split into {len(all_splits)} documents")
from langchain_community.embeddings import QuantizedBiEncoderEmbeddings
from langchain_core.embeddings import Embeddings
model_name = "Intel/bge-small-en-v1.5-rag-int8-static"
encode_kwargs = {"normalize_embeddings": True} # set True to compute cosine similarity
model_inc = QuantizedBiEncoderEmbeddings(
model_name=model_name,
encode_kwargs=encode_kwargs,
query_instruction="Represent this sentence for searching relevant passages: ",
)
def get_multi_vector_retriever(
docstore_id_key: str, collection_name: str, embedding_function: Embeddings
):
"""Create the composed retriever object."""
vectorstore = Chroma(
collection_name=collection_name,
embedding_function=embedding_function,
)
store = InMemoryByteStore()
return MultiVectorRetriever(
vectorstore=vectorstore,
byte_store=store,
id_key=docstore_id_key,
)
retriever = get_multi_vector_retriever(DOCSTORE_ID_KEY, "multi_vec_store", model_inc)
child_text_splitter = | RecursiveCharacterTextSplitter(chunk_size=400) | langchain_text_splitters.RecursiveCharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet amadeus > /dev/null')
import os
os.environ["AMADEUS_CLIENT_ID"] = "CLIENT_ID"
os.environ["AMADEUS_CLIENT_SECRET"] = "CLIENT_SECRET"
os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY"
from langchain_community.agent_toolkits.amadeus.toolkit import AmadeusToolkit
toolkit = | AmadeusToolkit() | langchain_community.agent_toolkits.amadeus.toolkit.AmadeusToolkit |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pygithub')
import os
from langchain.agents import AgentType, initialize_agent
from langchain_community.agent_toolkits.github.toolkit import GitHubToolkit
from langchain_community.utilities.github import GitHubAPIWrapper
from langchain_openai import ChatOpenAI
os.environ["GITHUB_APP_ID"] = "123456"
os.environ["GITHUB_APP_PRIVATE_KEY"] = "path/to/your/private-key.pem"
os.environ["GITHUB_REPOSITORY"] = "username/repo-name"
os.environ["GITHUB_BRANCH"] = "bot-branch-name"
os.environ["GITHUB_BASE_BRANCH"] = "main"
os.environ["OPENAI_API_KEY"] = ""
llm = ChatOpenAI(temperature=0, model="gpt-4-1106-preview")
github = GitHubAPIWrapper()
toolkit = GitHubToolkit.from_github_api_wrapper(github)
tools = toolkit.get_tools()
agent = initialize_agent(
tools,
llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
print("Available tools:")
for tool in tools:
print("\t" + tool.name)
agent.run(
"You have the software engineering capabilities of a Google Principle engineer. You are tasked with completing issues on a github repository. Please look at the existing issues and complete them."
)
from langchain import hub
gh_issue_prompt_template = hub.pull("kastanday/new-github-issue")
print(gh_issue_prompt_template.template)
def format_issue(issue):
title = f"Title: {issue.get('title')}."
opened_by = f"Opened by user: {issue.get('opened_by')}"
body = f"Body: {issue.get('body')}"
comments = issue.get("comments") # often too long
return "\n".join([title, opened_by, body])
issue = github.get_issue(33) # task to implement a RNA-seq pipeline (bioinformatics)
final_gh_issue_prompt = gh_issue_prompt_template.format(
issue_description=format_issue(issue)
)
print(final_gh_issue_prompt)
from langchain.memory.summary_buffer import ConversationSummaryBufferMemory
from langchain_core.prompts.chat import MessagesPlaceholder
summarizer_llm = | ChatOpenAI(temperature=0, model="gpt-3.5-turbo") | langchain_openai.ChatOpenAI |
from langchain.retrievers import ParentDocumentRetriever
from langchain.storage import InMemoryStore
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loaders = [
TextLoader("../../paul_graham_essay.txt"),
| TextLoader("../../state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
from langchain.evaluation import load_evaluator
evaluator = load_evaluator("embedding_distance")
evaluator.evaluate_strings(prediction="I shall go", reference="I shan't go")
evaluator.evaluate_strings(prediction="I shall go", reference="I will go")
from langchain.evaluation import EmbeddingDistance
list(EmbeddingDistance)
evaluator = load_evaluator(
"embedding_distance", distance_metric=EmbeddingDistance.EUCLIDEAN
)
from langchain_community.embeddings import HuggingFaceEmbeddings
embedding_model = HuggingFaceEmbeddings()
hf_evaluator = | load_evaluator("embedding_distance", embeddings=embedding_model) | langchain.evaluation.load_evaluator |
"""For basic init and call"""
import os
from langchain_community.embeddings import VolcanoEmbeddings
os.environ["VOLC_ACCESSKEY"] = ""
os.environ["VOLC_SECRETKEY"] = ""
embed = | VolcanoEmbeddings(volcano_ak="", volcano_sk="") | langchain_community.embeddings.VolcanoEmbeddings |
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools import BaseTool, StructuredTool, tool
@tool
def search(query: str) -> str:
"""Look up things online."""
return "LangChain"
print(search.name)
print(search.description)
print(search.args)
@tool
def multiply(a: int, b: int) -> int:
"""Multiply two numbers."""
return a * b
print(multiply.name)
print(multiply.description)
print(multiply.args)
class SearchInput(BaseModel):
query: str = Field(description="should be a search query")
@tool("search-tool", args_schema=SearchInput, return_direct=True)
def search(query: str) -> str:
"""Look up things online."""
return "LangChain"
print(search.name)
print(search.description)
print(search.args)
print(search.return_direct)
from typing import Optional, Type
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
class SearchInput(BaseModel):
query: str = Field(description="should be a search query")
class CalculatorInput(BaseModel):
a: int = Field(description="first number")
b: int = Field(description="second number")
class CustomSearchTool(BaseTool):
name = "custom_search"
description = "useful for when you need to answer questions about current events"
args_schema: Type[BaseModel] = SearchInput
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
return "LangChain"
async def _arun(
self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("custom_search does not support async")
class CustomCalculatorTool(BaseTool):
name = "Calculator"
description = "useful for when you need to answer questions about math"
args_schema: Type[BaseModel] = CalculatorInput
return_direct: bool = True
def _run(
self, a: int, b: int, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
return a * b
async def _arun(
self,
a: int,
b: int,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("Calculator does not support async")
search = CustomSearchTool()
print(search.name)
print(search.description)
print(search.args)
multiply = CustomCalculatorTool()
print(multiply.name)
print(multiply.description)
print(multiply.args)
print(multiply.return_direct)
def search_function(query: str):
return "LangChain"
search = StructuredTool.from_function(
func=search_function,
name="Search",
description="useful for when you need to answer questions about current events",
)
print(search.name)
print(search.description)
print(search.args)
class CalculatorInput(BaseModel):
a: int = Field(description="first number")
b: int = | Field(description="second number") | langchain.pydantic_v1.Field |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet timescale-vector')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken')
import os
from dotenv import find_dotenv, load_dotenv
_ = load_dotenv(find_dotenv())
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
from typing import Tuple
from datetime import datetime, timedelta
from langchain.docstore.document import Document
from langchain_community.document_loaders import TextLoader
from langchain_community.document_loaders.json_loader import JSONLoader
from langchain_community.vectorstores.timescalevector import TimescaleVector
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = | TextLoader("../../../extras/modules/state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"Write out the following equation using algebraic symbols then solve it. Use the format\n\nEQUATION:...\nSOLUTION:...\n\n",
),
("human", "{equation_statement}"),
]
)
model = ChatOpenAI(temperature=0)
runnable = (
{"equation_statement": RunnablePassthrough()} | prompt | model | StrOutputParser()
)
print(runnable.invoke("x raised to the third plus seven equals 12"))
runnable = (
{"equation_statement": RunnablePassthrough()}
| prompt
| model.bind(stop="SOLUTION")
| StrOutputParser()
)
print(runnable.invoke("x raised to the third plus seven equals 12"))
function = {
"name": "solver",
"description": "Formulates and solves an equation",
"parameters": {
"type": "object",
"properties": {
"equation": {
"type": "string",
"description": "The algebraic expression of the equation",
},
"solution": {
"type": "string",
"description": "The solution to the equation",
},
},
"required": ["equation", "solution"],
},
}
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"Write out the following equation using algebraic symbols then solve it.",
),
("human", "{equation_statement}"),
]
)
model = | ChatOpenAI(model="gpt-4", temperature=0) | langchain_openai.ChatOpenAI |
from langchain.output_parsers import DatetimeOutputParser
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
output_parser = | DatetimeOutputParser() | langchain.output_parsers.DatetimeOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet amadeus > /dev/null')
import os
os.environ["AMADEUS_CLIENT_ID"] = "CLIENT_ID"
os.environ["AMADEUS_CLIENT_SECRET"] = "CLIENT_SECRET"
os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY"
from langchain_community.agent_toolkits.amadeus.toolkit import AmadeusToolkit
toolkit = AmadeusToolkit()
tools = toolkit.get_tools()
from langchain_community.llms import HuggingFaceHub
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "YOUR_HF_API_TOKEN"
llm = HuggingFaceHub(
repo_id="tiiuae/falcon-7b-instruct",
model_kwargs={"temperature": 0.5, "max_length": 64},
)
toolkit_hf = AmadeusToolkit(llm=llm)
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser
from langchain.tools.render import render_text_description_and_args
from langchain_openai import ChatOpenAI
llm = | ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-community langchainhub langchain-openai faiss-cpu')
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
from langchain.evaluation import load_evaluator
evaluator = load_evaluator("criteria", criteria="conciseness")
from langchain.evaluation import EvaluatorType
evaluator = load_evaluator(EvaluatorType.CRITERIA, criteria="conciseness")
eval_result = evaluator.evaluate_strings(
prediction="What's 2+2? That's an elementary question. The answer you're looking for is that two and two is four.",
input="What's 2+2?",
)
print(eval_result)
evaluator = | load_evaluator("labeled_criteria", criteria="correctness") | langchain.evaluation.load_evaluator |
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)')
get_ipython().system(' pip install "unstructured[all-docs]" pillow pydantic lxml pillow matplotlib chromadb tiktoken')
from langchain_text_splitters import CharacterTextSplitter
from unstructured.partition.pdf import partition_pdf
def extract_pdf_elements(path, fname):
"""
Extract images, tables, and chunk text from a PDF file.
path: File path, which is used to dump images (.jpg)
fname: File name
"""
return partition_pdf(
filename=path + fname,
extract_images_in_pdf=False,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
def categorize_elements(raw_pdf_elements):
"""
Categorize extracted elements from a PDF into tables and texts.
raw_pdf_elements: List of unstructured.documents.elements
"""
tables = []
texts = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
tables.append(str(element))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
texts.append(str(element))
return texts, tables
fpath = "/Users/rlm/Desktop/cj/"
fname = "cj.pdf"
raw_pdf_elements = extract_pdf_elements(fpath, fname)
texts, tables = categorize_elements(raw_pdf_elements)
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
chunk_size=4000, chunk_overlap=0
)
joined_texts = " ".join(texts)
texts_4k_token = text_splitter.split_text(joined_texts)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
def generate_text_summaries(texts, tables, summarize_texts=False):
"""
Summarize text elements
texts: List of str
tables: List of str
summarize_texts: Bool to summarize texts
"""
prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \
These summaries will be embedded and used to retrieve the raw text or table elements. \
Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOpenAI(temperature=0, model="gpt-4")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
text_summaries = []
table_summaries = []
if texts and summarize_texts:
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5})
elif texts:
text_summaries = texts
if tables:
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
return text_summaries, table_summaries
text_summaries, table_summaries = generate_text_summaries(
texts_4k_token, tables, summarize_texts=True
)
import base64
import os
from langchain_core.messages import HumanMessage
def encode_image(image_path):
"""Getting the base64 string"""
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
def image_summarize(img_base64, prompt):
"""Make image summary"""
chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=1024)
msg = chat.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
},
]
)
]
)
return msg.content
def generate_img_summaries(path):
"""
Generate summaries and base64 encoded strings for images
path: Path to list of .jpg files extracted by Unstructured
"""
img_base64_list = []
image_summaries = []
prompt = """You are an assistant tasked with summarizing images for retrieval. \
These summaries will be embedded and used to retrieve the raw image. \
Give a concise summary of the image that is well optimized for retrieval."""
for img_file in sorted(os.listdir(path)):
if img_file.endswith(".jpg"):
img_path = os.path.join(path, img_file)
base64_image = encode_image(img_path)
img_base64_list.append(base64_image)
image_summaries.append(image_summarize(base64_image, prompt))
return img_base64_list, image_summaries
img_base64_list, image_summaries = generate_img_summaries(fpath)
import uuid
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
def create_multi_vector_retriever(
vectorstore, text_summaries, texts, table_summaries, tables, image_summaries, images
):
"""
Create retriever that indexes summaries, but returns raw images or texts
"""
store = InMemoryStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
def add_documents(retriever, doc_summaries, doc_contents):
doc_ids = [str(uuid.uuid4()) for _ in doc_contents]
summary_docs = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(doc_summaries)
]
retriever.vectorstore.add_documents(summary_docs)
retriever.docstore.mset(list(zip(doc_ids, doc_contents)))
if text_summaries:
add_documents(retriever, text_summaries, texts)
if table_summaries:
add_documents(retriever, table_summaries, tables)
if image_summaries:
add_documents(retriever, image_summaries, images)
return retriever
vectorstore = Chroma(
collection_name="mm_rag_cj_blog", embedding_function=OpenAIEmbeddings()
)
retriever_multi_vector_img = create_multi_vector_retriever(
vectorstore,
text_summaries,
texts,
table_summaries,
tables,
image_summaries,
img_base64_list,
)
import io
import re
from IPython.display import HTML, display
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from PIL import Image
def plt_img_base64(img_base64):
"""Disply base64 encoded string as image"""
image_html = f'<img src="data:image/jpeg;base64,{img_base64}" />'
display(HTML(image_html))
def looks_like_base64(sb):
"""Check if the string looks like base64"""
return re.match("^[A-Za-z0-9+/]+[=]{0,2}$", sb) is not None
def is_image_data(b64data):
"""
Check if the base64 data is an image by looking at the start of the data
"""
image_signatures = {
b"\xFF\xD8\xFF": "jpg",
b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A": "png",
b"\x47\x49\x46\x38": "gif",
b"\x52\x49\x46\x46": "webp",
}
try:
header = base64.b64decode(b64data)[:8] # Decode and get the first 8 bytes
for sig, format in image_signatures.items():
if header.startswith(sig):
return True
return False
except Exception:
return False
def resize_base64_image(base64_string, size=(128, 128)):
"""
Resize an image encoded as a Base64 string
"""
img_data = base64.b64decode(base64_string)
img = Image.open(io.BytesIO(img_data))
resized_img = img.resize(size, Image.LANCZOS)
buffered = io.BytesIO()
resized_img.save(buffered, format=img.format)
return base64.b64encode(buffered.getvalue()).decode("utf-8")
def split_image_text_types(docs):
"""
Split base64-encoded images and texts
"""
b64_images = []
texts = []
for doc in docs:
if isinstance(doc, Document):
doc = doc.page_content
if looks_like_base64(doc) and is_image_data(doc):
doc = resize_base64_image(doc, size=(1300, 600))
b64_images.append(doc)
else:
texts.append(doc)
return {"images": b64_images, "texts": texts}
def img_prompt_func(data_dict):
"""
Join the context into a single string
"""
formatted_texts = "\n".join(data_dict["context"]["texts"])
messages = []
if data_dict["context"]["images"]:
for image in data_dict["context"]["images"]:
image_message = {
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image}"},
}
messages.append(image_message)
text_message = {
"type": "text",
"text": (
"You are financial analyst tasking with providing investment advice.\n"
"You will be given a mixed of text, tables, and image(s) usually of charts or graphs.\n"
"Use this information to provide investment advice related to the user question. \n"
f"User-provided question: {data_dict['question']}\n\n"
"Text and / or tables:\n"
f"{formatted_texts}"
),
}
messages.append(text_message)
return [HumanMessage(content=messages)]
def multi_modal_rag_chain(retriever):
"""
Multi-modal RAG chain
"""
model = ChatOpenAI(temperature=0, model="gpt-4-vision-preview", max_tokens=1024)
chain = (
{
"context": retriever | RunnableLambda(split_image_text_types),
"question": RunnablePassthrough(),
}
| | RunnableLambda(img_prompt_func) | langchain_core.runnables.RunnableLambda |
from langchain_community.document_loaders.blob_loaders.youtube_audio import (
YoutubeAudioLoader,
)
from langchain_community.document_loaders.generic import GenericLoader
from langchain_community.document_loaders.parsers import (
OpenAIWhisperParser,
OpenAIWhisperParserLocal,
)
get_ipython().run_line_magic('pip', 'install --upgrade --quiet yt_dlp')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pydub')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet librosa')
local = False
urls = ["https://youtu.be/kCc8FmEb1nY", "https://youtu.be/VMj-3S1tku0"]
save_dir = "~/Downloads/YouTube"
if local:
loader = GenericLoader(
| YoutubeAudioLoader(urls, save_dir) | langchain_community.document_loaders.blob_loaders.youtube_audio.YoutubeAudioLoader |
from typing import List
from langchain.prompts.chat import (
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
BaseMessage,
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
class CAMELAgent:
def __init__(
self,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.system_message = system_message
self.model = model
self.init_messages()
def reset(self) -> None:
self.init_messages()
return self.stored_messages
def init_messages(self) -> None:
self.stored_messages = [self.system_message]
def update_messages(self, message: BaseMessage) -> List[BaseMessage]:
self.stored_messages.append(message)
return self.stored_messages
def step(
self,
input_message: HumanMessage,
) -> AIMessage:
messages = self.update_messages(input_message)
output_message = self.model(messages)
self.update_messages(output_message)
return output_message
import os
os.environ["OPENAI_API_KEY"] = ""
assistant_role_name = "Python Programmer"
user_role_name = "Stock Trader"
task = "Develop a trading bot for the stock market"
word_limit = 50 # word limit for task brainstorming
task_specifier_sys_msg = SystemMessage(content="You can make a task more specific.")
task_specifier_prompt = """Here is a task that {assistant_role_name} will help {user_role_name} to complete: {task}.
Please make it more specific. Be creative and imaginative.
Please reply with the specified task in {word_limit} words or less. Do not add anything else."""
task_specifier_template = HumanMessagePromptTemplate.from_template(
template=task_specifier_prompt
)
task_specify_agent = CAMELAgent(task_specifier_sys_msg, ChatOpenAI(temperature=1.0))
task_specifier_msg = task_specifier_template.format_messages(
assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
task=task,
word_limit=word_limit,
)[0]
specified_task_msg = task_specify_agent.step(task_specifier_msg)
print(f"Specified task: {specified_task_msg.content}")
specified_task = specified_task_msg.content
assistant_inception_prompt = """Never forget you are a {assistant_role_name} and I am a {user_role_name}. Never flip roles! Never instruct me!
We share a common interest in collaborating to successfully complete a task.
You must help me to complete the task.
Here is the task: {task}. Never forget our task!
I must instruct you based on your expertise and my needs to complete the task.
I must give you one instruction at a time.
You must write a specific solution that appropriately completes the requested instruction.
You must decline my instruction honestly if you cannot perform the instruction due to physical, moral, legal reasons or your capability and explain the reasons.
Do not add anything else other than your solution to my instruction.
You are never supposed to ask me any questions you only answer questions.
You are never supposed to reply with a flake solution. Explain your solutions.
Your solution must be declarative sentences and simple present tense.
Unless I say the task is completed, you should always start with:
Solution: <YOUR_SOLUTION>
<YOUR_SOLUTION> should be specific and provide preferable implementations and examples for task-solving.
Always end <YOUR_SOLUTION> with: Next request."""
user_inception_prompt = """Never forget you are a {user_role_name} and I am a {assistant_role_name}. Never flip roles! You will always instruct me.
We share a common interest in collaborating to successfully complete a task.
I must help you to complete the task.
Here is the task: {task}. Never forget our task!
You must instruct me based on my expertise and your needs to complete the task ONLY in the following two ways:
1. Instruct with a necessary input:
Instruction: <YOUR_INSTRUCTION>
Input: <YOUR_INPUT>
2. Instruct without any input:
Instruction: <YOUR_INSTRUCTION>
Input: None
The "Instruction" describes a task or question. The paired "Input" provides further context or information for the requested "Instruction".
You must give me one instruction at a time.
I must write a response that appropriately completes the requested instruction.
I must decline your instruction honestly if I cannot perform the instruction due to physical, moral, legal reasons or my capability and explain the reasons.
You should instruct me not ask me questions.
Now you must start to instruct me using the two ways described above.
Do not add anything else other than your instruction and the optional corresponding input!
Keep giving me instructions and necessary inputs until you think the task is completed.
When the task is completed, you must only reply with a single word <CAMEL_TASK_DONE>.
Never say <CAMEL_TASK_DONE> unless my responses have solved your task."""
def get_sys_msgs(assistant_role_name: str, user_role_name: str, task: str):
assistant_sys_template = SystemMessagePromptTemplate.from_template(
template=assistant_inception_prompt
)
assistant_sys_msg = assistant_sys_template.format_messages(
assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
task=task,
)[0]
user_sys_template = SystemMessagePromptTemplate.from_template(
template=user_inception_prompt
)
user_sys_msg = user_sys_template.format_messages(
assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
task=task,
)[0]
return assistant_sys_msg, user_sys_msg
assistant_sys_msg, user_sys_msg = get_sys_msgs(
assistant_role_name, user_role_name, specified_task
)
assistant_agent = CAMELAgent(assistant_sys_msg, ChatOpenAI(temperature=0.2))
user_agent = CAMELAgent(user_sys_msg, ChatOpenAI(temperature=0.2))
assistant_agent.reset()
user_agent.reset()
user_msg = HumanMessage(
content=(
f"{user_sys_msg.content}. "
"Now start to give me introductions one by one. "
"Only reply with Instruction and Input."
)
)
assistant_msg = | HumanMessage(content=f"{assistant_sys_msg.content}") | langchain.schema.HumanMessage |
import json
from pprint import pprint
from langchain.globals import set_debug
from langchain_community.llms import NIBittensorLLM
set_debug(True)
llm_sys = NIBittensorLLM(
system_prompt="Your task is to determine response based on user prompt.Explain me like I am technical lead of a project"
)
sys_resp = llm_sys(
"What is bittensor and What are the potential benefits of decentralized AI?"
)
print(f"Response provided by LLM with system prompt set is : {sys_resp}")
""" {
"choices": [
{"index": Bittensor's Metagraph index number,
"uid": Unique Identifier of a miner,
"responder_hotkey": Hotkey of a miner,
"message":{"role":"assistant","content": Contains actual response},
"response_ms": Time in millisecond required to fetch response from a miner}
]
} """
multi_response_llm = NIBittensorLLM(top_responses=10)
multi_resp = multi_response_llm("What is Neural Network Feeding Mechanism?")
json_multi_resp = json.loads(multi_resp)
pprint(json_multi_resp)
from langchain.chains import LLMChain
from langchain.globals import set_debug
from langchain.prompts import PromptTemplate
from langchain_community.llms import NIBittensorLLM
set_debug(True)
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
llm = NIBittensorLLM(
system_prompt="Your task is to determine response based on user prompt."
)
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What is bittensor?"
llm_chain.run(question)
from langchain.tools import Tool
from langchain_community.utilities import GoogleSearchAPIWrapper
search = GoogleSearchAPIWrapper()
tool = Tool(
name="Google Search",
description="Search Google for recent results.",
func=search.run,
)
from langchain.agents import (
AgentExecutor,
ZeroShotAgent,
)
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from langchain_community.llms import NIBittensorLLM
memory = | ConversationBufferMemory(memory_key="chat_history") | langchain.memory.ConversationBufferMemory |
get_ipython().system('poetry run pip install dgml-utils==0.3.0 --upgrade --quiet')
import os
from langchain_community.document_loaders import DocugamiLoader
DOCUGAMI_API_KEY = os.environ.get("DOCUGAMI_API_KEY")
docset_id = "26xpy3aes7xp"
document_ids = ["d7jqdzcj50sj", "cgd1eacfkchw"]
loader = DocugamiLoader(docset_id=docset_id, document_ids=document_ids)
chunks = loader.load()
len(chunks)
loader.min_text_length = 64
loader.include_xml_tags = True
chunks = loader.load()
for chunk in chunks[:5]:
print(chunk)
get_ipython().system('poetry run pip install --upgrade langchain-openai tiktoken chromadb hnswlib')
loader = DocugamiLoader(docset_id="zo954yqy53wp")
chunks = loader.load()
for chunk in chunks:
stripped_metadata = chunk.metadata.copy()
for key in chunk.metadata:
if key not in ["name", "xpath", "id", "structure"]:
del stripped_metadata[key]
chunk.metadata = stripped_metadata
print(len(chunks))
from langchain.chains import RetrievalQA
from langchain_community.vectorstores.chroma import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
embedding = OpenAIEmbeddings()
vectordb = Chroma.from_documents(documents=chunks, embedding=embedding)
retriever = vectordb.as_retriever()
qa_chain = RetrievalQA.from_chain_type(
llm= | OpenAI() | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-community langchainhub gpt4all chromadb')
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
from langchain_community.embeddings import GPT4AllEmbeddings
from langchain_community.vectorstores import Chroma
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
question = "What are the approaches to Task Decomposition?"
docs = vectorstore.similarity_search(question)
len(docs)
docs[0]
get_ipython().run_line_magic('pip', 'install --upgrade --quiet llama-cpp-python')
get_ipython().system(' CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 /Users/rlm/miniforge3/envs/llama/bin/pip install -U llama-cpp-python --no-cache-dir')
from langchain_community.llms import LlamaCpp
n_gpu_layers = 1 # Metal set to 1 is enough.
n_batch = 512 # Should be between 1 and n_ctx, consider the amount of RAM of your Apple Silicon Chip.
llm = LlamaCpp(
model_path="/Users/rlm/Desktop/Code/llama.cpp/models/llama-2-13b-chat.ggufv3.q4_0.bin",
n_gpu_layers=n_gpu_layers,
n_batch=n_batch,
n_ctx=2048,
f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls
verbose=True,
)
llm.invoke("Simulate a rap battle between Stephen Colbert and John Oliver")
from langchain_community.llms import GPT4All
gpt4all = GPT4All(
model="/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin",
max_tokens=2048,
)
from langchain_community.llms.llamafile import Llamafile
llamafile = Llamafile()
llamafile.invoke("Here is my grandmother's beloved recipe for spaghetti and meatballs:")
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
prompt = PromptTemplate.from_template(
"Summarize the main themes in these retrieved docs: {docs}"
)
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
chain = {"docs": format_docs} | prompt | llm | StrOutputParser()
question = "What are the approaches to Task Decomposition?"
docs = vectorstore.similarity_search(question)
chain.invoke(docs)
from langchain import hub
rag_prompt = hub.pull("rlm/rag-prompt")
rag_prompt.messages
from langchain_core.runnables import RunnablePassthrough, RunnablePick
chain = (
RunnablePassthrough.assign(context=RunnablePick("context") | format_docs)
| rag_prompt
| llm
| StrOutputParser()
)
chain.invoke({"context": docs, "question": question})
rag_prompt_llama = hub.pull("rlm/rag-prompt-llama")
rag_prompt_llama.messages
chain = (
RunnablePassthrough.assign(context= | RunnablePick("context") | langchain_core.runnables.RunnablePick |
get_ipython().run_line_magic('pip', 'install -U --quiet langchain langchain_community openai chromadb langchain-experimental')
get_ipython().run_line_magic('pip', 'install --quiet "unstructured[all-docs]" pypdf pillow pydantic lxml pillow matplotlib chromadb tiktoken')
import logging
import zipfile
import requests
logging.basicConfig(level=logging.INFO)
data_url = "https://storage.googleapis.com/benchmarks-artifacts/langchain-docs-benchmarking/cj.zip"
result = requests.get(data_url)
filename = "cj.zip"
with open(filename, "wb") as file:
file.write(result.content)
with zipfile.ZipFile(filename, "r") as zip_ref:
zip_ref.extractall()
from langchain_community.document_loaders import PyPDFLoader
loader = PyPDFLoader("./cj/cj.pdf")
docs = loader.load()
tables = []
texts = [d.page_content for d in docs]
len(texts)
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatVertexAI
from langchain_community.llms import VertexAI
from langchain_core.messages import AIMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableLambda
def generate_text_summaries(texts, tables, summarize_texts=False):
"""
Summarize text elements
texts: List of str
tables: List of str
summarize_texts: Bool to summarize texts
"""
prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \
These summaries will be embedded and used to retrieve the raw text or table elements. \
Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """
prompt = PromptTemplate.from_template(prompt_text)
empty_response = RunnableLambda(
lambda x: AIMessage(content="Error processing document")
)
model = VertexAI(
temperature=0, model_name="gemini-pro", max_output_tokens=1024
).with_fallbacks([empty_response])
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
text_summaries = []
table_summaries = []
if texts and summarize_texts:
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 1})
elif texts:
text_summaries = texts
if tables:
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 1})
return text_summaries, table_summaries
text_summaries, table_summaries = generate_text_summaries(
texts, tables, summarize_texts=True
)
len(text_summaries)
import base64
import os
from langchain_core.messages import HumanMessage
def encode_image(image_path):
"""Getting the base64 string"""
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
def image_summarize(img_base64, prompt):
"""Make image summary"""
model = ChatVertexAI(model_name="gemini-pro-vision", max_output_tokens=1024)
msg = model(
[
HumanMessage(
content=[
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
},
]
)
]
)
return msg.content
def generate_img_summaries(path):
"""
Generate summaries and base64 encoded strings for images
path: Path to list of .jpg files extracted by Unstructured
"""
img_base64_list = []
image_summaries = []
prompt = """You are an assistant tasked with summarizing images for retrieval. \
These summaries will be embedded and used to retrieve the raw image. \
Give a concise summary of the image that is well optimized for retrieval."""
for img_file in sorted(os.listdir(path)):
if img_file.endswith(".jpg"):
img_path = os.path.join(path, img_file)
base64_image = encode_image(img_path)
img_base64_list.append(base64_image)
image_summaries.append(image_summarize(base64_image, prompt))
return img_base64_list, image_summaries
img_base64_list, image_summaries = generate_img_summaries("./cj")
len(image_summaries)
import uuid
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_community.embeddings import VertexAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
def create_multi_vector_retriever(
vectorstore, text_summaries, texts, table_summaries, tables, image_summaries, images
):
"""
Create retriever that indexes summaries, but returns raw images or texts
"""
store = InMemoryStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
def add_documents(retriever, doc_summaries, doc_contents):
doc_ids = [str(uuid.uuid4()) for _ in doc_contents]
summary_docs = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(doc_summaries)
]
retriever.vectorstore.add_documents(summary_docs)
retriever.docstore.mset(list(zip(doc_ids, doc_contents)))
if text_summaries:
add_documents(retriever, text_summaries, texts)
if table_summaries:
add_documents(retriever, table_summaries, tables)
if image_summaries:
add_documents(retriever, image_summaries, images)
return retriever
vectorstore = Chroma(
collection_name="mm_rag_cj_blog",
embedding_function=VertexAIEmbeddings(model_name="textembedding-gecko@latest"),
)
retriever_multi_vector_img = create_multi_vector_retriever(
vectorstore,
text_summaries,
texts,
table_summaries,
tables,
image_summaries,
img_base64_list,
)
import io
import re
from IPython.display import HTML, display
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from PIL import Image
def plt_img_base64(img_base64):
"""Disply base64 encoded string as image"""
image_html = f'<img src="data:image/jpeg;base64,{img_base64}" />'
display(HTML(image_html))
def looks_like_base64(sb):
"""Check if the string looks like base64"""
return re.match("^[A-Za-z0-9+/]+[=]{0,2}$", sb) is not None
def is_image_data(b64data):
"""
Check if the base64 data is an image by looking at the start of the data
"""
image_signatures = {
b"\xFF\xD8\xFF": "jpg",
b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A": "png",
b"\x47\x49\x46\x38": "gif",
b"\x52\x49\x46\x46": "webp",
}
try:
header = base64.b64decode(b64data)[:8] # Decode and get the first 8 bytes
for sig, format in image_signatures.items():
if header.startswith(sig):
return True
return False
except Exception:
return False
def resize_base64_image(base64_string, size=(128, 128)):
"""
Resize an image encoded as a Base64 string
"""
img_data = base64.b64decode(base64_string)
img = Image.open(io.BytesIO(img_data))
resized_img = img.resize(size, Image.LANCZOS)
buffered = io.BytesIO()
resized_img.save(buffered, format=img.format)
return base64.b64encode(buffered.getvalue()).decode("utf-8")
def split_image_text_types(docs):
"""
Split base64-encoded images and texts
"""
b64_images = []
texts = []
for doc in docs:
if isinstance(doc, Document):
doc = doc.page_content
if looks_like_base64(doc) and is_image_data(doc):
doc = resize_base64_image(doc, size=(1300, 600))
b64_images.append(doc)
else:
texts.append(doc)
if len(b64_images) > 0:
return {"images": b64_images[:1], "texts": []}
return {"images": b64_images, "texts": texts}
def img_prompt_func(data_dict):
"""
Join the context into a single string
"""
formatted_texts = "\n".join(data_dict["context"]["texts"])
messages = []
text_message = {
"type": "text",
"text": (
"You are financial analyst tasking with providing investment advice.\n"
"You will be given a mixed of text, tables, and image(s) usually of charts or graphs.\n"
"Use this information to provide investment advice related to the user question. \n"
f"User-provided question: {data_dict['question']}\n\n"
"Text and / or tables:\n"
f"{formatted_texts}"
),
}
messages.append(text_message)
if data_dict["context"]["images"]:
for image in data_dict["context"]["images"]:
image_message = {
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image}"},
}
messages.append(image_message)
return [HumanMessage(content=messages)]
def multi_modal_rag_chain(retriever):
"""
Multi-modal RAG chain
"""
model = ChatVertexAI(
temperature=0, model_name="gemini-pro-vision", max_output_tokens=1024
)
chain = (
{
"context": retriever | RunnableLambda(split_image_text_types),
"question": RunnablePassthrough(),
}
| | RunnableLambda(img_prompt_func) | langchain_core.runnables.RunnableLambda |
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-openai langchain-anthropic langchain-community wikipedia')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
os.environ["ANTHROPIC_API_KEY"] = getpass.getpass()
from langchain_community.retrievers import WikipediaRetriever
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
wiki = WikipediaRetriever(top_k_results=6, doc_content_chars_max=2000)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\n\nHere are the Wikipedia articles:{context}",
),
("human", "{question}"),
]
)
prompt.pretty_print()
from operator import itemgetter
from typing import List
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
def format_docs(docs: List[Document]) -> str:
"""Convert Documents to a single string.:"""
formatted = [
f"Article Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for doc in docs
]
return "\n\n" + "\n\n".join(formatted)
format = itemgetter("docs") | RunnableLambda(format_docs)
answer = prompt | llm | StrOutputParser()
chain = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format)
.assign(answer=answer)
.pick(["answer", "docs"])
)
chain.invoke("How fast are cheetahs?")
from langchain_core.pydantic_v1 import BaseModel, Field
class cited_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[int] = Field(
...,
description="The integer IDs of the SPECIFIC sources which justify the answer.",
)
llm_with_tool = llm.bind_tools(
[cited_answer],
tool_choice="cited_answer",
)
example_q = """What Brian's height?
Source: 1
Information: Suzy is 6'2"
Source: 2
Information: Jeremiah is blonde
Source: 3
Information: Brian is 3 inches shorted than Suzy"""
llm_with_tool.invoke(example_q)
from langchain.output_parsers.openai_tools import JsonOutputKeyToolsParser
output_parser = JsonOutputKeyToolsParser(key_name="cited_answer", return_single=True)
(llm_with_tool | output_parser).invoke(example_q)
def format_docs_with_id(docs: List[Document]) -> str:
formatted = [
f"Source ID: {i}\nArticle Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for i, doc in enumerate(docs)
]
return "\n\n" + "\n\n".join(formatted)
format_1 = itemgetter("docs") | RunnableLambda(format_docs_with_id)
answer_1 = prompt | llm_with_tool | output_parser
chain_1 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_1)
.assign(cited_answer=answer_1)
.pick(["cited_answer", "docs"])
)
chain_1.invoke("How fast are cheetahs?")
class Citation(BaseModel):
source_id: int = Field(
...,
description="The integer ID of a SPECIFIC source which justifies the answer.",
)
quote: str = Field(
...,
description="The VERBATIM quote from the specified source that justifies the answer.",
)
class quoted_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[Citation] = Field(
..., description="Citations from the given sources that justify the answer."
)
output_parser_2 = JsonOutputKeyToolsParser(key_name="quoted_answer", return_single=True)
llm_with_tool_2 = llm.bind_tools(
[quoted_answer],
tool_choice="quoted_answer",
)
format_2 = itemgetter("docs") | RunnableLambda(format_docs_with_id)
answer_2 = prompt | llm_with_tool_2 | output_parser_2
chain_2 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_2)
.assign(quoted_answer=answer_2)
.pick(["quoted_answer", "docs"])
)
chain_2.invoke("How fast are cheetahs?")
from langchain_anthropic import ChatAnthropicMessages
anthropic = ChatAnthropicMessages(model_name="claude-instant-1.2")
system = """You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, \
answer the user question and provide citations. If none of the articles answer the question, just say you don't know.
Remember, you must return both an answer and citations. A citation consists of a VERBATIM quote that \
justifies the answer and the ID of the quote article. Return a citation for every quote across all articles \
that justify the answer. Use the following format for your final output:
<cited_answer>
<answer></answer>
<citations>
<citation><source_id></source_id><quote></quote></citation>
<citation><source_id></source_id><quote></quote></citation>
...
</citations>
</cited_answer>
Here are the Wikipedia articles:{context}"""
prompt_3 = ChatPromptTemplate.from_messages(
[("system", system), ("human", "{question}")]
)
from langchain_core.output_parsers import XMLOutputParser
def format_docs_xml(docs: List[Document]) -> str:
formatted = []
for i, doc in enumerate(docs):
doc_str = f"""\
<source id=\"{i}\">
<title>{doc.metadata['title']}</title>
<article_snippet>{doc.page_content}</article_snippet>
</source>"""
formatted.append(doc_str)
return "\n\n<sources>" + "\n".join(formatted) + "</sources>"
format_3 = itemgetter("docs") | RunnableLambda(format_docs_xml)
answer_3 = prompt_3 | anthropic | XMLOutputParser() | itemgetter("cited_answer")
chain_3 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_3)
.assign(cited_answer=answer_3)
.pick(["cited_answer", "docs"])
)
chain_3.invoke("How fast are cheetahs?")
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
splitter = RecursiveCharacterTextSplitter(
chunk_size=400,
chunk_overlap=0,
separators=["\n\n", "\n", ".", " "],
keep_separator=False,
)
compressor = EmbeddingsFilter(embeddings=OpenAIEmbeddings(), k=10)
def split_and_filter(input) -> List[Document]:
docs = input["docs"]
question = input["question"]
split_docs = splitter.split_documents(docs)
stateful_docs = compressor.compress_documents(split_docs, question)
return [stateful_doc for stateful_doc in stateful_docs]
retrieve = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki) | split_and_filter
)
docs = retrieve.invoke("How fast are cheetahs?")
for doc in docs:
print(doc.page_content)
print("\n\n")
chain_4 = (
RunnableParallel(question=RunnablePassthrough(), docs=retrieve)
.assign(context=format)
.assign(answer=answer)
.pick(["answer", "docs"])
)
chain_4.invoke("How fast are cheetahs?")
class Citation(BaseModel):
source_id: int = Field(
...,
description="The integer ID of a SPECIFIC source which justifies the answer.",
)
quote: str = Field(
...,
description="The VERBATIM quote from the specified source that justifies the answer.",
)
class annotated_answer(BaseModel):
"""Annotate the answer to the user question with quote citations that justify the answer."""
citations: List[Citation] = Field(
..., description="Citations from the given sources that justify the answer."
)
llm_with_tools_5 = llm.bind_tools(
[annotated_answer],
tool_choice="annotated_answer",
)
from langchain_core.prompts import MessagesPlaceholder
prompt_5 = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\n\nHere are the Wikipedia articles:{context}",
),
("human", "{question}"),
MessagesPlaceholder("chat_history", optional=True),
]
)
answer_5 = prompt_5 | llm
annotation_chain = (
prompt_5
| llm_with_tools_5
| | JsonOutputKeyToolsParser(key_name="annotated_answer", return_single=True) | langchain.output_parsers.openai_tools.JsonOutputKeyToolsParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain fleet-context langchain-openai pandas faiss-cpu # faiss-gpu for CUDA supported GPU')
from operator import itemgetter
from typing import Any, Optional, Type
import pandas as pd
from langchain.retrievers import MultiVectorRetriever
from langchain_community.vectorstores import FAISS
from langchain_core.documents import Document
from langchain_core.stores import BaseStore
from langchain_core.vectorstores import VectorStore
from langchain_openai import OpenAIEmbeddings
def load_fleet_retriever(
df: pd.DataFrame,
*,
vectorstore_cls: Type[VectorStore] = FAISS,
docstore: Optional[BaseStore] = None,
**kwargs: Any,
):
vectorstore = _populate_vectorstore(df, vectorstore_cls)
if docstore is None:
return vectorstore.as_retriever(**kwargs)
else:
_populate_docstore(df, docstore)
return MultiVectorRetriever(
vectorstore=vectorstore, docstore=docstore, id_key="parent", **kwargs
)
def _populate_vectorstore(
df: pd.DataFrame,
vectorstore_cls: Type[VectorStore],
) -> VectorStore:
if not hasattr(vectorstore_cls, "from_embeddings"):
raise ValueError(
f"Incompatible vector store class {vectorstore_cls}."
"Must implement `from_embeddings` class method."
)
texts_embeddings = []
metadatas = []
for _, row in df.iterrows():
texts_embeddings.append((row.metadata["text"], row["dense_embeddings"]))
metadatas.append(row.metadata)
return vectorstore_cls.from_embeddings(
texts_embeddings,
OpenAIEmbeddings(model="text-embedding-ada-002"),
metadatas=metadatas,
)
def _populate_docstore(df: pd.DataFrame, docstore: BaseStore) -> None:
parent_docs = []
df = df.copy()
df["parent"] = df.metadata.apply(itemgetter("parent"))
for parent_id, group in df.groupby("parent"):
sorted_group = group.iloc[
group.metadata.apply(itemgetter("section_index")).argsort()
]
text = "".join(sorted_group.metadata.apply(itemgetter("text")))
metadata = {
k: sorted_group.iloc[0].metadata[k] for k in ("title", "type", "url")
}
text = metadata["title"] + "\n" + text
metadata["id"] = parent_id
parent_docs.append(Document(page_content=text, metadata=metadata))
docstore.mset(((d.metadata["id"], d) for d in parent_docs))
from context import download_embeddings
df = download_embeddings("langchain")
vecstore_retriever = load_fleet_retriever(df)
vecstore_retriever.get_relevant_documents("How does the multi vector retriever work")
from langchain.storage import InMemoryStore
parent_retriever = load_fleet_retriever(
"https://www.dropbox.com/scl/fi/4rescpkrg9970s3huz47l/libraries_langchain_release.parquet?rlkey=283knw4wamezfwiidgpgptkep&dl=1",
docstore=InMemoryStore(),
)
parent_retriever.get_relevant_documents("How does the multi vector retriever work")
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""You are a great software engineer who is very familiar \
with Python. Given a user question or request about a new Python library called LangChain and \
parts of the LangChain documentation, answer the question or generate the requested code. \
Your answers must be accurate, should include code whenever possible, and should assume anything \
about LangChain which is note explicitly stated in the LangChain documentation. If the required \
information is not available, just say so.
LangChain Documentation
------------------
{context}""",
),
("human", "{question}"),
]
)
model = ChatOpenAI(model="gpt-3.5-turbo-16k")
chain = (
{
"question": | RunnablePassthrough() | langchain_core.runnables.RunnablePassthrough |
from langchain_community.embeddings import FakeEmbeddings
from langchain_community.vectorstores import Vectara
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
vectara = | Vectara.from_files(["state_of_the_union.txt"]) | langchain_community.vectorstores.Vectara.from_files |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet scikit-learn')
from langchain_community.retrievers import TFIDFRetriever
retriever = TFIDFRetriever.from_texts(["foo", "bar", "world", "hello", "foo bar"])
from langchain_core.documents import Document
retriever = TFIDFRetriever.from_documents(
[
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="world"),
Document(page_content="hello"),
| Document(page_content="foo bar") | langchain_core.documents.Document |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_community.chat_models import ChatAnthropic
from langchain_openai import ChatOpenAI
from unittest.mock import patch
import httpx
from openai import RateLimitError
request = httpx.Request("GET", "/")
response = httpx.Response(200, request=request)
error = RateLimitError("rate limit", response=response, body="")
openai_llm = ChatOpenAI(max_retries=0)
anthropic_llm = ChatAnthropic()
llm = openai_llm.with_fallbacks([anthropic_llm])
with patch("openai.resources.chat.completions.Completions.create", side_effect=error):
try:
print(openai_llm.invoke("Why did the chicken cross the road?"))
except RateLimitError:
print("Hit error")
with patch("openai.resources.chat.completions.Completions.create", side_effect=error):
try:
print(llm.invoke("Why did the chicken cross the road?"))
except RateLimitError:
print("Hit error")
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a nice assistant who always includes a compliment in your response",
),
("human", "Why did the {animal} cross the road"),
]
)
chain = prompt | llm
with patch("openai.resources.chat.completions.Completions.create", side_effect=error):
try:
print(chain.invoke({"animal": "kangaroo"}))
except RateLimitError:
print("Hit error")
from langchain_core.output_parsers import StrOutputParser
chat_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a nice assistant who always includes a compliment in your response",
),
("human", "Why did the {animal} cross the road"),
]
)
chat_model = | ChatOpenAI(model_name="gpt-fake") | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet text-generation transformers google-search-results numexpr langchainhub sentencepiece jinja2')
import os
from langchain_community.llms import HuggingFaceTextGenInference
ENDPOINT_URL = "<YOUR_ENDPOINT_URL_HERE>"
HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
llm = HuggingFaceTextGenInference(
inference_server_url=ENDPOINT_URL,
max_new_tokens=512,
top_k=50,
temperature=0.1,
repetition_penalty=1.03,
server_kwargs={
"headers": {
"Authorization": f"Bearer {HF_TOKEN}",
"Content-Type": "application/json",
}
},
)
from langchain_community.llms import HuggingFaceEndpoint
ENDPOINT_URL = "<YOUR_ENDPOINT_URL_HERE>"
llm = HuggingFaceEndpoint(
endpoint_url=ENDPOINT_URL,
task="text-generation",
model_kwargs={
"max_new_tokens": 512,
"top_k": 50,
"temperature": 0.1,
"repetition_penalty": 1.03,
},
)
from langchain_community.llms import HuggingFaceHub
llm = HuggingFaceHub(
repo_id="HuggingFaceH4/zephyr-7b-beta",
task="text-generation",
model_kwargs={
"max_new_tokens": 512,
"top_k": 30,
"temperature": 0.1,
"repetition_penalty": 1.03,
},
)
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_community.chat_models.huggingface import ChatHuggingFace
messages = [
SystemMessage(content="You're a helpful assistant"),
HumanMessage(
content="What happens when an unstoppable force meets an immovable object?"
),
]
chat_model = ChatHuggingFace(llm=llm)
chat_model.model_id
chat_model._to_chat_prompt(messages)
res = chat_model.invoke(messages)
print(res.content)
from langchain import hub
from langchain.agents import AgentExecutor, load_tools
from langchain.agents.format_scratchpad import format_log_to_str
from langchain.agents.output_parsers import (
ReActJsonSingleInputOutputParser,
)
from langchain.tools.render import render_text_description
from langchain_community.utilities import SerpAPIWrapper
tools = load_tools(["serpapi", "llm-math"], llm=llm)
prompt = hub.pull("hwchase17/react-json")
prompt = prompt.partial(
tools= | render_text_description(tools) | langchain.tools.render.render_text_description |
import json
from pprint import pprint
from langchain.globals import set_debug
from langchain_community.llms import NIBittensorLLM
set_debug(True)
llm_sys = | NIBittensorLLM(
system_prompt="Your task is to determine response based on user prompt.Explain me like I am technical lead of a project"
) | langchain_community.llms.NIBittensorLLM |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lark chromadb')
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
docs = [
Document(
page_content="A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"},
),
Document(
page_content="Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata={"year": 2010, "director": "Christopher Nolan", "rating": 8.2},
),
Document(
page_content="A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata={"year": 2006, "director": "Satoshi Kon", "rating": 8.6},
),
Document(
page_content="A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata={"year": 2019, "director": "Greta Gerwig", "rating": 8.3},
),
Document(
page_content="Toys come alive and have a blast doing so",
metadata={"year": 1995, "genre": "animated"},
),
Document(
page_content="Three men walk into the Zone, three men walk out of the Zone",
metadata={
"year": 1979,
"director": "Andrei Tarkovsky",
"genre": "thriller",
"rating": 9.9,
},
),
]
vectorstore = Chroma.from_documents(docs, | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
from langchain.chains import ConversationChain
from langchain.memory import (
CombinedMemory,
ConversationBufferMemory,
ConversationSummaryMemory,
)
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
conv_memory = ConversationBufferMemory(
memory_key="chat_history_lines", input_key="input"
)
summary_memory = ConversationSummaryMemory(llm= | OpenAI() | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-community langchainhub langchain-openai faiss-cpu')
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(texts, embeddings)
retriever = db.as_retriever()
from langchain.tools.retriever import create_retriever_tool
tool = create_retriever_tool(
retriever,
"search_state_of_union",
"Searches and returns excerpts from the 2022 State of the Union.",
)
tools = [tool]
from langchain import hub
prompt = hub.pull("hwchase17/openai-tools-agent")
prompt.messages
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0)
from langchain.agents import AgentExecutor, create_openai_tools_agent
agent = create_openai_tools_agent(llm, tools, prompt)
agent_executor = | AgentExecutor(agent=agent, tools=tools) | langchain.agents.AgentExecutor |
import getpass
import os
os.environ["ALPHAVANTAGE_API_KEY"] = getpass.getpass()
from langchain_community.utilities.alpha_vantage import AlphaVantageAPIWrapper
alpha_vantage = | AlphaVantageAPIWrapper() | langchain_community.utilities.alpha_vantage.AlphaVantageAPIWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-community langchainhub langchain-openai faiss-cpu')
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = | FAISS.from_documents(texts, embeddings) | langchain_community.vectorstores.FAISS.from_documents |
import asyncio
import os
import nest_asyncio
import pandas as pd
from langchain.docstore.document import Document
from langchain_community.agent_toolkits.pandas.base import create_pandas_dataframe_agent
from langchain_experimental.autonomous_agents import AutoGPT
from langchain_openai import ChatOpenAI
nest_asyncio.apply()
llm = ChatOpenAI(model_name="gpt-4", temperature=1.0)
import os
from contextlib import contextmanager
from typing import Optional
from langchain.agents import tool
from langchain_community.tools.file_management.read import ReadFileTool
from langchain_community.tools.file_management.write import WriteFileTool
ROOT_DIR = "./data/"
@contextmanager
def pushd(new_dir):
"""Context manager for changing the current working directory."""
prev_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(prev_dir)
@tool
def process_csv(
csv_file_path: str, instructions: str, output_path: Optional[str] = None
) -> str:
"""Process a CSV by with pandas in a limited REPL.\
Only use this after writing data to disk as a csv file.\
Any figures must be saved to disk to be viewed by the human.\
Instructions should be written in natural language, not code. Assume the dataframe is already loaded."""
with pushd(ROOT_DIR):
try:
df = pd.read_csv(csv_file_path)
except Exception as e:
return f"Error: {e}"
agent = create_pandas_dataframe_agent(llm, df, max_iterations=30, verbose=True)
if output_path is not None:
instructions += f" Save output to disk at {output_path}"
try:
result = agent.run(instructions)
return result
except Exception as e:
return f"Error: {e}"
async def async_load_playwright(url: str) -> str:
"""Load the specified URLs using Playwright and parse using BeautifulSoup."""
from bs4 import BeautifulSoup
from playwright.async_api import async_playwright
results = ""
async with async_playwright() as p:
browser = await p.chromium.launch(headless=True)
try:
page = await browser.new_page()
await page.goto(url)
page_source = await page.content()
soup = BeautifulSoup(page_source, "html.parser")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
results = "\n".join(chunk for chunk in chunks if chunk)
except Exception as e:
results = f"Error: {e}"
await browser.close()
return results
def run_async(coro):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(coro)
@tool
def browse_web_page(url: str) -> str:
"""Verbose way to scrape a whole webpage. Likely to cause issues parsing."""
return run_async(async_load_playwright(url))
from langchain.chains.qa_with_sources.loading import (
BaseCombineDocumentsChain,
load_qa_with_sources_chain,
)
from langchain.tools import BaseTool, DuckDuckGoSearchRun
from langchain_text_splitters import RecursiveCharacterTextSplitter
from pydantic import Field
def _get_text_splitter():
return RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=20,
length_function=len,
)
class WebpageQATool(BaseTool):
name = "query_webpage"
description = (
"Browse a webpage and retrieve the information relevant to the question."
)
text_splitter: RecursiveCharacterTextSplitter = Field(
default_factory=_get_text_splitter
)
qa_chain: BaseCombineDocumentsChain
def _run(self, url: str, question: str) -> str:
"""Useful for browsing websites and scraping the text information."""
result = browse_web_page.run(url)
docs = [ | Document(page_content=result, metadata={"source": url}) | langchain.docstore.document.Document |
get_ipython().system(' pip install langchain unstructured[all-docs] pydantic lxml')
from typing import Any
from pydantic import BaseModel
from unstructured.partition.pdf import partition_pdf
path = "/Users/rlm/Desktop/Papers/LLaVA/"
raw_pdf_elements = partition_pdf(
filename=path + "LLaVA.pdf",
extract_images_in_pdf=True,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
category_counts = {}
for element in raw_pdf_elements:
category = str(type(element))
if category in category_counts:
category_counts[category] += 1
else:
category_counts[category] = 1
unique_categories = set(category_counts.keys())
category_counts
class Element(BaseModel):
type: str
text: Any
categorized_elements = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
categorized_elements.append(Element(type="table", text=str(element)))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
categorized_elements.append(Element(type="text", text=str(element)))
table_elements = [e for e in categorized_elements if e.type == "table"]
print(len(table_elements))
text_elements = [e for e in categorized_elements if e.type == "text"]
print(len(text_elements))
from langchain_community.chat_models import ChatOllama
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
prompt_text = """You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOllama(model="llama2:13b-chat")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
texts = [i.text for i in text_elements if i.text != ""]
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5})
tables = [i.text for i in table_elements]
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
get_ipython().run_cell_magic('bash', '', '\n# Define the directory containing the images\nIMG_DIR=~/Desktop/Papers/LLaVA/\n\n# Loop through each image in the directory\nfor img in "${IMG_DIR}"*.jpg; do\n # Extract the base name of the image without extension\n base_name=$(basename "$img" .jpg)\n\n # Define the output file name based on the image name\n output_file="${IMG_DIR}${base_name}.txt"\n\n # Execute the command and save the output to the defined output file\n /Users/rlm/Desktop/Code/llama.cpp/bin/llava -m ../models/llava-7b/ggml-model-q5_k.gguf --mmproj ../models/llava-7b/mmproj-model-f16.gguf --temp 0.1 -p "Describe the image in detail. Be specific about graphs, such as bar plots." --image "$img" > "$output_file"\n\ndone\n')
import glob
import os
file_paths = glob.glob(os.path.expanduser(os.path.join(path, "*.txt")))
img_summaries = []
for file_path in file_paths:
with open(file_path, "r") as file:
img_summaries.append(file.read())
cleaned_img_summary = [
s.split("clip_model_load: total allocated memory: 201.27 MB\n\n", 1)[1].strip()
for s in img_summaries
]
import uuid
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_community.embeddings import GPT4AllEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
vectorstore = Chroma(
collection_name="summaries", embedding_function=GPT4AllEmbeddings()
)
store = InMemoryStore() # <- Can we extend this to images
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
doc_ids = [str(uuid.uuid4()) for _ in texts]
summary_texts = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(text_summaries)
]
retriever.vectorstore.add_documents(summary_texts)
retriever.docstore.mset(list(zip(doc_ids, texts)))
table_ids = [str(uuid.uuid4()) for _ in tables]
summary_tables = [
Document(page_content=s, metadata={id_key: table_ids[i]})
for i, s in enumerate(table_summaries)
]
retriever.vectorstore.add_documents(summary_tables)
retriever.docstore.mset(list(zip(table_ids, tables)))
img_ids = [str(uuid.uuid4()) for _ in cleaned_img_summary]
summary_img = [
Document(page_content=s, metadata={id_key: img_ids[i]})
for i, s in enumerate(cleaned_img_summary)
]
retriever.vectorstore.add_documents(summary_img)
retriever.docstore.mset(
list(zip(img_ids, cleaned_img_summary))
) # Store the image summary as the raw document
retriever.get_relevant_documents("Images / figures with playful and creative examples")[
0
]
from langchain_core.runnables import RunnablePassthrough
template = """Answer the question based only on the following context, which can include text and tables:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOllama(model="llama2:13b-chat")
chain = (
{"context": retriever, "question": | RunnablePassthrough() | langchain_core.runnables.RunnablePassthrough |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet clarifai')
from getpass import getpass
CLARIFAI_PAT = getpass()
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Clarifai
from langchain_text_splitters import CharacterTextSplitter
USER_ID = "USERNAME_ID"
APP_ID = "APPLICATION_ID"
NUMBER_OF_DOCS = 2
texts = [
"I really enjoy spending time with you",
"I hate spending time with my dog",
"I want to go for a run",
"I went to the movies yesterday",
"I love playing soccer with my friends",
]
metadatas = [
{"id": i, "text": text, "source": "book 1", "category": ["books", "modern"]}
for i, text in enumerate(texts)
]
idlist = ["text1", "text2", "text3", "text4", "text5"]
metadatas = [
{"id": idlist[i], "text": text, "source": "book 1", "category": ["books", "modern"]}
for i, text in enumerate(texts)
]
clarifai_vector_db = Clarifai(
user_id=USER_ID,
app_id=APP_ID,
number_of_docs=NUMBER_OF_DOCS,
)
response = clarifai_vector_db.add_texts(texts=texts, ids=idlist, metadatas=metadatas)
response = clarifai_vector_db.add_texts(texts=texts)
clarifai_vector_db = Clarifai.from_texts(
user_id=USER_ID,
app_id=APP_ID,
texts=texts,
metadatas=metadatas,
)
docs = clarifai_vector_db.similarity_search("I would like to see you")
docs
book1_similar_docs = clarifai_vector_db.similarity_search(
"I would love to see you", filter={"source": "book 1"}
)
book_category_similar_docs = clarifai_vector_db.similarity_search(
"I would love to see you", filter={"category": ["books"]}
)
loader = TextLoader("your_local_file_path.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet neo4j')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain.docstore.document import Document
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Neo4jVector
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
history = StreamlitChatMessageHistory(key="chat_messages")
history.add_user_message("hi!")
history.add_ai_message("whats up?")
history.messages
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
msgs = StreamlitChatMessageHistory(key="special_app_key")
if len(msgs.messages) == 0:
msgs.add_ai_message("How can I help you?")
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are an AI chatbot having a conversation with a human."),
| MessagesPlaceholder(variable_name="history") | langchain_core.prompts.MessagesPlaceholder |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai argilla')
import os
os.environ["ARGILLA_API_URL"] = "..."
os.environ["ARGILLA_API_KEY"] = "..."
os.environ["OPENAI_API_KEY"] = "..."
import argilla as rg
from packaging.version import parse as parse_version
if parse_version(rg.__version__) < parse_version("1.8.0"):
raise RuntimeError(
"`FeedbackDataset` is only available in Argilla v1.8.0 or higher, please "
"upgrade `argilla` as `pip install argilla --upgrade`."
)
dataset = rg.FeedbackDataset(
fields=[
rg.TextField(name="prompt"),
rg.TextField(name="response"),
],
questions=[
rg.RatingQuestion(
name="response-rating",
description="How would you rate the quality of the response?",
values=[1, 2, 3, 4, 5],
required=True,
),
rg.TextQuestion(
name="response-feedback",
description="What feedback do you have for the response?",
required=False,
),
],
guidelines="You're asked to rate the quality of the response and provide feedback.",
)
rg.init(
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
dataset.push_to_argilla("langchain-dataset")
from langchain.callbacks import ArgillaCallbackHandler
argilla_callback = ArgillaCallbackHandler(
dataset_name="langchain-dataset",
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler
from langchain_openai import OpenAI
argilla_callback = ArgillaCallbackHandler(
dataset_name="langchain-dataset",
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
callbacks = [StdOutCallbackHandler(), argilla_callback]
llm = | OpenAI(temperature=0.9, callbacks=callbacks) | langchain_openai.OpenAI |
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
splits = text_splitter.split_documents(data)
embedding = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-core langchain langchain-openai')
from langchain.utils.math import cosine_similarity
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
physics_template = """You are a very smart physics professor. \
You are great at answering questions about physics in a concise and easy to understand manner. \
When you don't know the answer to a question you admit that you don't know.
Here is a question:
{query}"""
math_template = """You are a very good mathematician. You are great at answering math questions. \
You are so good because you are able to break down hard problems into their component parts, \
answer the component parts, and then put them together to answer the broader question.
Here is a question:
{query}"""
embeddings = OpenAIEmbeddings()
prompt_templates = [physics_template, math_template]
prompt_embeddings = embeddings.embed_documents(prompt_templates)
def prompt_router(input):
query_embedding = embeddings.embed_query(input["query"])
similarity = cosine_similarity([query_embedding], prompt_embeddings)[0]
most_similar = prompt_templates[similarity.argmax()]
print("Using MATH" if most_similar == math_template else "Using PHYSICS")
return PromptTemplate.from_template(most_similar)
chain = (
{"query": RunnablePassthrough()}
| RunnableLambda(prompt_router)
| ChatOpenAI()
| | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
def initialize_chain(instructions, memory=None):
if memory is None:
memory = ConversationBufferWindowMemory()
memory.ai_prefix = "Assistant"
template = f"""
Instructions: {instructions}
{{{memory.memory_key}}}
Human: {{human_input}}
Assistant:"""
prompt = PromptTemplate(
input_variables=["history", "human_input"], template=template
)
chain = LLMChain(
llm=OpenAI(temperature=0),
prompt=prompt,
verbose=True,
memory=ConversationBufferWindowMemory(),
)
return chain
def initialize_meta_chain():
meta_template = """
Assistant has just had the below interactions with a User. Assistant followed their "Instructions" closely. Your job is to critique the Assistant's performance and then revise the Instructions so that Assistant would quickly and correctly respond in the future.
{chat_history}
Please reflect on these interactions.
You should first critique Assistant's performance. What could Assistant have done better? What should the Assistant remember about this user? Are there things this user always wants? Indicate this with "Critique: ...".
You should next revise the Instructions so that Assistant would quickly and correctly respond in the future. Assistant's goal is to satisfy the user in as few interactions as possible. Assistant will only see the new Instructions, not the interaction history, so anything important must be summarized in the Instructions. Don't forget any important details in the current Instructions! Indicate the new Instructions by "Instructions: ...".
"""
meta_prompt = PromptTemplate(
input_variables=["chat_history"], template=meta_template
)
meta_chain = LLMChain(
llm= | OpenAI(temperature=0) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet semanticscholar')
from langchain import hub
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain_openai import ChatOpenAI
instructions = """You are an expert researcher."""
base_prompt = | hub.pull("langchain-ai/openai-functions-template") | langchain.hub.pull |
from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompt_values import PromptValue
from langchain_openai import ChatOpenAI
short_context_model = ChatOpenAI(model="gpt-3.5-turbo")
long_context_model = ChatOpenAI(model="gpt-3.5-turbo-16k")
def get_context_length(prompt: PromptValue):
messages = prompt.to_messages()
tokens = short_context_model.get_num_tokens_from_messages(messages)
return tokens
prompt = PromptTemplate.from_template("Summarize this passage: {context}")
def choose_model(prompt: PromptValue):
context_len = get_context_length(prompt)
if context_len < 30:
print("short model")
return short_context_model
else:
print("long model")
return long_context_model
chain = prompt | choose_model | | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-google-cloud-sql-pg langchain-google-vertexai')
from google.colab import auth
auth.authenticate_user()
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
get_ipython().system('gcloud services enable sqladmin.googleapis.com')
REGION = "us-central1" # @param {type: "string"}
INSTANCE = "my-pg-instance" # @param {type: "string"}
DATABASE = "my-database" # @param {type: "string"}
TABLE_NAME = "vector_store" # @param {type: "string"}
from langchain_google_cloud_sql_pg import PostgreSQLEngine
engine = await PostgreSQLEngine.afrom_instance(
project_id=PROJECT_ID, region=REGION, instance=INSTANCE, database=DATABASE
)
from langchain_google_cloud_sql_pg import PostgreSQLEngine
await engine.ainit_vectorstore_table(
table_name=TABLE_NAME,
vector_size=768, # Vector size for VertexAI model(textembedding-gecko@latest)
)
get_ipython().system('gcloud services enable aiplatform.googleapis.com')
from langchain_google_vertexai import VertexAIEmbeddings
embedding = VertexAIEmbeddings(
model_name="textembedding-gecko@latest", project=PROJECT_ID
)
from langchain_google_cloud_sql_pg import PostgresVectorStore
store = await PostgresVectorStore.create( # Use .create() to initialize an async vector store
engine=engine,
table_name=TABLE_NAME,
embedding_service=embedding,
)
import uuid
all_texts = ["Apples and oranges", "Cars and airplanes", "Pineapple", "Train", "Banana"]
metadatas = [{"len": len(t)} for t in all_texts]
ids = [str(uuid.uuid4()) for _ in all_texts]
await store.aadd_texts(all_texts, metadatas=metadatas, ids=ids)
await store.adelete([ids[1]])
query = "I'd like a fruit."
docs = await store.asimilarity_search(query)
print(docs)
query_vector = embedding.embed_query(query)
docs = await store.asimilarity_search_by_vector(query_vector, k=2)
print(docs)
from langchain_google_cloud_sql_pg.indexes import IVFFlatIndex
index = IVFFlatIndex()
await store.aapply_vector_index(index)
await store.areindex() # Re-index using default index name
await store.aadrop_vector_index() # Delete index using default name
from langchain_google_cloud_sql_pg import Column
TABLE_NAME = "vectorstore_custom"
await engine.ainit_vectorstore_table(
table_name=TABLE_NAME,
vector_size=768, # VertexAI model: textembedding-gecko@latest
metadata_columns=[ | Column("len", "INTEGER") | langchain_google_cloud_sql_pg.Column |
import os
import pprint
os.environ["SERPER_API_KEY"] = ""
from langchain_community.utilities import GoogleSerperAPIWrapper
search = GoogleSerperAPIWrapper()
search.run("Obama's first name?")
os.environ["OPENAI_API_KEY"] = ""
from langchain.agents import AgentType, Tool, initialize_agent
from langchain_community.utilities import GoogleSerperAPIWrapper
from langchain_openai import OpenAI
llm = | OpenAI(temperature=0) | langchain_openai.OpenAI |
from langchain_community.document_loaders import ObsidianLoader
loader = | ObsidianLoader("<path-to-obsidian>") | langchain_community.document_loaders.ObsidianLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3')
from langchain.retrievers import AmazonKendraRetriever
retriever = | AmazonKendraRetriever(index_id="c0806df7-e76b-4bce-9b5c-d5582f6b1a03") | langchain.retrievers.AmazonKendraRetriever |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet azureml-fsspec, azure-ai-generative')
from azure.ai.resources.client import AIClient
from azure.identity import DefaultAzureCredential
from langchain_community.document_loaders import AzureAIDataLoader
client = AIClient(
credential=DefaultAzureCredential(),
subscription_id="<subscription_id>",
resource_group_name="<resource_group_name>",
project_name="<project_name>",
)
data_asset = client.data.get(name="<data_asset_name>", label="latest")
loader = AzureAIDataLoader(url=data_asset.path)
loader.load()
loader = | AzureAIDataLoader(url=data_asset.path, glob="*.pdf") | langchain_community.document_loaders.AzureAIDataLoader |
get_ipython().system(' pip install langchain docugami==0.0.8 dgml-utils==0.3.0 pydantic langchainhub chromadb hnswlib --upgrade --quiet')
from pprint import pprint
from docugami import Docugami
from docugami.lib.upload import upload_to_named_docset, wait_for_dgml
DOCSET_NAME = "NTSB Aviation Incident Reports"
FILE_PATHS = [
"/Users/tjaffri/ntsb/Report_CEN23LA277_192541.pdf",
"/Users/tjaffri/ntsb/Report_CEN23LA338_192753.pdf",
"/Users/tjaffri/ntsb/Report_CEN23LA363_192876.pdf",
"/Users/tjaffri/ntsb/Report_CEN23LA394_192995.pdf",
"/Users/tjaffri/ntsb/Report_ERA23LA114_106615.pdf",
"/Users/tjaffri/ntsb/Report_WPR23LA254_192532.pdf",
]
assert len(FILE_PATHS) > 5, "Please provide at least 6 files"
dg_client = Docugami()
dg_docs = upload_to_named_docset(dg_client, FILE_PATHS, DOCSET_NAME)
dgml_paths = wait_for_dgml(dg_client, dg_docs)
pprint(dgml_paths)
from pathlib import Path
from dgml_utils.segmentation import get_chunks_str
dgml_path = dgml_paths[Path(FILE_PATHS[0]).name]
with open(dgml_path, "r") as file:
contents = file.read().encode("utf-8")
chunks = get_chunks_str(
contents,
include_xml_tags=True, # Ensures Docugami XML semantic tags are included in the chunked output (set to False for text-only chunks and tables as Markdown)
max_text_length=1024 * 8, # 8k chars are ~2k tokens for OpenAI.
)
print(f"found {len(chunks)} chunks, here are the first few")
for chunk in chunks[:10]:
print(chunk.text)
with open(dgml_path, "r") as file:
contents = file.read().encode("utf-8")
chunks = get_chunks_str(
contents,
include_xml_tags=False, # text-only chunks and tables as Markdown
max_text_length=1024
* 8, # 8k chars are ~2k tokens for OpenAI. Ref: https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
)
print(f"found {len(chunks)} chunks, here are the first few")
for chunk in chunks[:10]:
print(chunk.text)
import requests
dgml = requests.get(
"https://raw.githubusercontent.com/docugami/dgml-utils/main/python/tests/test_data/article/Jane%20Doe.xml"
).text
chunks = get_chunks_str(dgml, include_xml_tags=True)
len(chunks)
category_counts = {}
for element in chunks:
category = element.structure
if category in category_counts:
category_counts[category] += 1
else:
category_counts[category] = 1
category_counts
table_elements = [c for c in chunks if "table" in c.structure.split()]
print(f"There are {len(table_elements)} tables")
text_elements = [c for c in chunks if "table" not in c.structure.split()]
print(f"There are {len(text_elements)} text elements")
for element in text_elements[:20]:
print(element.text)
print(table_elements[0].text)
chunks_as_text = get_chunks_str(dgml, include_xml_tags=False)
table_elements_as_text = [c for c in chunks_as_text if "table" in c.structure.split()]
print(table_elements_as_text[0].text)
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
prompt_text = """You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: {element} """
prompt = | ChatPromptTemplate.from_template(prompt_text) | langchain.prompts.ChatPromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet praw')
client_id = ""
client_secret = ""
user_agent = ""
from langchain_community.tools.reddit_search.tool import RedditSearchRun
from langchain_community.utilities.reddit_search import RedditSearchAPIWrapper
search = RedditSearchRun(
api_wrapper=RedditSearchAPIWrapper(
reddit_client_id=client_id,
reddit_client_secret=client_secret,
reddit_user_agent=user_agent,
)
)
from langchain_community.tools.reddit_search.tool import RedditSearchSchema
search_params = RedditSearchSchema(
query="beginner", sort="new", time_filter="week", subreddit="python", limit="2"
)
result = search.run(tool_input=search_params.dict())
print(result)
from langchain.agents import AgentExecutor, StructuredChatAgent, Tool
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory
from langchain.prompts import PromptTemplate
from langchain_community.tools.reddit_search.tool import RedditSearchRun
from langchain_community.utilities.reddit_search import RedditSearchAPIWrapper
from langchain_openai import ChatOpenAI
client_id = ""
client_secret = ""
user_agent = ""
openai_api_key = ""
template = """This is a conversation between a human and a bot:
{chat_history}
Write a summary of the conversation for {input}:
"""
prompt = PromptTemplate(input_variables=["input", "chat_history"], template=template)
memory = ConversationBufferMemory(memory_key="chat_history")
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"
{chat_history}
Question: {input}
{agent_scratchpad}"""
tools = [
RedditSearchRun(
api_wrapper=RedditSearchAPIWrapper(
reddit_client_id=client_id,
reddit_client_secret=client_secret,
reddit_user_agent=user_agent,
)
)
]
prompt = StructuredChatAgent.create_prompt(
prefix=prefix,
tools=tools,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"],
)
llm = | ChatOpenAI(temperature=0, openai_api_key=openai_api_key) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet scikit-learn')
from langchain_community.retrievers import TFIDFRetriever
retriever = | TFIDFRetriever.from_texts(["foo", "bar", "world", "hello", "foo bar"]) | langchain_community.retrievers.TFIDFRetriever.from_texts |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet infinopy')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet matplotlib')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken')
import datetime as dt
import json
import time
import matplotlib.dates as md
import matplotlib.pyplot as plt
from infinopy import InfinoClient
from langchain.callbacks import InfinoCallbackHandler
from langchain_openai import OpenAI
get_ipython().system('docker run --rm --detach --name infino-example -p 3000:3000 infinohq/infino:latest')
client = InfinoClient()
data = """In what country is Normandy located?
When were the Normans in Normandy?
From which countries did the Norse originate?
Who was the Norse leader?
What century did the Normans first gain their separate identity?
Who gave their name to Normandy in the 1000's and 1100's
What is France a region of?
Who did King Charles III swear fealty to?
When did the Frankish identity emerge?
Who was the duke in the battle of Hastings?
Who ruled the duchy of Normandy
What religion were the Normans
What type of major impact did the Norman dynasty have on modern Europe?
Who was famed for their Christian spirit?
Who assimilted the Roman language?
Who ruled the country of Normandy?
What principality did William the conquerer found?
What is the original meaning of the word Norman?
When was the Latin version of the word Norman first recorded?
What name comes from the English words Normans/Normanz?"""
questions = data.split("\n")
handler = InfinoCallbackHandler(
model_id="test_openai", model_version="0.1", verbose=False
)
llm = OpenAI(temperature=0.1)
num_questions = 10
questions = questions[0:num_questions]
for question in questions:
print(question)
llm_result = llm.generate([question], callbacks=[handler])
print(llm_result)
def plot(data, title):
data = json.loads(data)
timestamps = [item["time"] for item in data]
dates = [dt.datetime.fromtimestamp(ts) for ts in timestamps]
y = [item["value"] for item in data]
plt.rcParams["figure.figsize"] = [6, 4]
plt.subplots_adjust(bottom=0.2)
plt.xticks(rotation=25)
ax = plt.gca()
xfmt = md.DateFormatter("%Y-%m-%d %H:%M:%S")
ax.xaxis.set_major_formatter(xfmt)
plt.plot(dates, y)
plt.xlabel("Time")
plt.ylabel("Value")
plt.title(title)
plt.show()
response = client.search_ts("__name__", "latency", 0, int(time.time()))
plot(response.text, "Latency")
response = client.search_ts("__name__", "error", 0, int(time.time()))
plot(response.text, "Errors")
response = client.search_ts("__name__", "prompt_tokens", 0, int(time.time()))
plot(response.text, "Prompt Tokens")
response = client.search_ts("__name__", "completion_tokens", 0, int(time.time()))
plot(response.text, "Completion Tokens")
response = client.search_ts("__name__", "total_tokens", 0, int(time.time()))
plot(response.text, "Total Tokens")
query = "normandy"
response = client.search_log(query, 0, int(time.time()))
print("Results for", query, ":", response.text)
print("===")
query = "king charles III"
response = client.search_log("king charles III", 0, int(time.time()))
print("Results for", query, ":", response.text)
from langchain.chains.summarize import load_summarize_chain
from langchain_community.document_loaders import WebBaseLoader
from langchain_openai import ChatOpenAI
handler = InfinoCallbackHandler(
model_id="test_chatopenai", model_version="0.1", verbose=False
)
urls = [
"https://lilianweng.github.io/posts/2023-06-23-agent/",
"https://medium.com/lyft-engineering/lyftlearn-ml-model-training-infrastructure-built-on-kubernetes-aef8218842bb",
"https://blog.langchain.dev/week-of-10-2-langchain-release-notes/",
]
for url in urls:
loader = | WebBaseLoader(url) | langchain_community.document_loaders.WebBaseLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet trubrics')
import os
os.environ["TRUBRICS_EMAIL"] = "***@***"
os.environ["TRUBRICS_PASSWORD"] = "***"
os.environ["OPENAI_API_KEY"] = "sk-***"
from langchain.callbacks import TrubricsCallbackHandler
from langchain_openai import OpenAI
llm = OpenAI(callbacks=[ | TrubricsCallbackHandler() | langchain.callbacks.TrubricsCallbackHandler |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from operator import itemgetter
from langchain.memory import ConversationBufferMemory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_openai import ChatOpenAI
model = | ChatOpenAI() | langchain_openai.ChatOpenAI |
from typing import List
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import PromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field, validator
from langchain_openai import ChatOpenAI
model = | ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_community.chat_models import ChatAnthropic
from langchain_openai import ChatOpenAI
from unittest.mock import patch
import httpx
from openai import RateLimitError
request = httpx.Request("GET", "/")
response = httpx.Response(200, request=request)
error = RateLimitError("rate limit", response=response, body="")
openai_llm = ChatOpenAI(max_retries=0)
anthropic_llm = ChatAnthropic()
llm = openai_llm.with_fallbacks([anthropic_llm])
with patch("openai.resources.chat.completions.Completions.create", side_effect=error):
try:
print(openai_llm.invoke("Why did the chicken cross the road?"))
except RateLimitError:
print("Hit error")
with patch("openai.resources.chat.completions.Completions.create", side_effect=error):
try:
print(llm.invoke("Why did the chicken cross the road?"))
except RateLimitError:
print("Hit error")
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a nice assistant who always includes a compliment in your response",
),
("human", "Why did the {animal} cross the road"),
]
)
chain = prompt | llm
with patch("openai.resources.chat.completions.Completions.create", side_effect=error):
try:
print(chain.invoke({"animal": "kangaroo"}))
except RateLimitError:
print("Hit error")
from langchain_core.output_parsers import StrOutputParser
chat_prompt = | ChatPromptTemplate.from_messages(
[
(
"system",
"You're a nice assistant who always includes a compliment in your response",
) | langchain_core.prompts.ChatPromptTemplate.from_messages |