prompt
stringlengths
51
10k
completion
stringlengths
8
362
api
stringlengths
18
90
get_ipython().run_line_magic('pip', 'install --upgrade --quiet vald-client-python') from langchain_community.document_loaders import TextLoader from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import Vald from langchain_text_splitters import CharacterTextSplitter raw_documents =
TextLoader("state_of_the_union.txt")
langchain_community.document_loaders.TextLoader
import os os.environ["BING_SUBSCRIPTION_KEY"] = "<key>" os.environ["BING_SEARCH_URL"] = "https://api.bing.microsoft.com/v7.0/search" from langchain_community.utilities import BingSearchAPIWrapper search = BingSearchAPIWrapper() search.run("python") search =
BingSearchAPIWrapper(k=1)
langchain_community.utilities.BingSearchAPIWrapper
get_ipython().run_line_magic('pip', 'install --upgrade --quiet openllm') from langchain_community.llms import OpenLLM server_url = "http://localhost:3000" # Replace with remote host if you are running on a remote server llm =
OpenLLM(server_url=server_url)
langchain_community.llms.OpenLLM
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-google-spanner') from google.colab import auth auth.authenticate_user() PROJECT_ID = "my-project-id" # @param {type:"string"} get_ipython().system('gcloud config set project {PROJECT_ID}') get_ipython().system('gcloud services enable spanner.googleapis.com') INSTANCE = "my-instance" # @param {type: "string"} DATABASE = "my-database" # @param {type: "string"} TABLE_NAME = "vectors_search_data" # @param {type: "string"} from langchain_google_spanner import SecondaryIndex, SpannerVectorStore, TableColumn SpannerVectorStore.init_vector_store_table( instance_id=INSTANCE, database_id=DATABASE, table_name=TABLE_NAME, id_column="row_id", metadata_columns=[ TableColumn(name="metadata", type="JSON", is_null=True), TableColumn(name="title", type="STRING(MAX)", is_null=False), ], secondary_indexes=[
SecondaryIndex(index_name="row_id_and_title", columns=["row_id", "title"])
langchain_google_spanner.SecondaryIndex
get_ipython().run_line_magic('pip', 'install --upgrade --quiet transformers --quiet') from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline hf = HuggingFacePipeline.from_model_id( model_id="gpt2", task="text-generation", pipeline_kwargs={"max_new_tokens": 10}, ) from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_id = "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10) hf =
HuggingFacePipeline(pipeline=pipe)
langchain_community.llms.huggingface_pipeline.HuggingFacePipeline
get_ipython().run_line_magic('pip', 'install --upgrade --quiet arxiv') from langchain import hub from langchain.agents import AgentExecutor, create_react_agent, load_tools from langchain_openai import ChatOpenAI llm = ChatOpenAI(temperature=0.0) tools = load_tools( ["arxiv"], ) prompt = hub.pull("hwchase17/react") agent = create_react_agent(llm, tools, prompt) agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) agent_executor.invoke( { "input": "What's the paper 1605.08386 about?", } ) from langchain_community.utilities import ArxivAPIWrapper arxiv =
ArxivAPIWrapper()
langchain_community.utilities.ArxivAPIWrapper
from langchain_community.document_loaders import IFixitLoader loader = IFixitLoader("https://www.ifixit.com/Teardown/Banana+Teardown/811") data = loader.load() data loader = IFixitLoader( "https://www.ifixit.com/Answers/View/318583/My+iPhone+6+is+typing+and+opening+apps+by+itself" ) data = loader.load() data loader = IFixitLoader("https://www.ifixit.com/Device/Standard_iPad") data = loader.load() data data =
IFixitLoader.load_suggestions("Banana")
langchain_community.document_loaders.IFixitLoader.load_suggestions
from langchain.chains import LLMChain from langchain.memory import ConversationBufferWindowMemory from langchain.prompts import PromptTemplate from langchain_openai import OpenAI def initialize_chain(instructions, memory=None): if memory is None: memory = ConversationBufferWindowMemory() memory.ai_prefix = "Assistant" template = f""" Instructions: {instructions} {{{memory.memory_key}}} Human: {{human_input}} Assistant:""" prompt = PromptTemplate( input_variables=["history", "human_input"], template=template ) chain = LLMChain( llm=OpenAI(temperature=0), prompt=prompt, verbose=True, memory=
ConversationBufferWindowMemory()
langchain.memory.ConversationBufferWindowMemory
from langchain.indexes import VectorstoreIndexCreator from langchain_community.document_loaders import IuguLoader iugu_loader = IuguLoader("charges") index =
VectorstoreIndexCreator()
langchain.indexes.VectorstoreIndexCreator
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic') import os import boto3 comprehend_client = boto3.client("comprehend", region_name="us-east-1") from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain comprehend_moderation = AmazonComprehendModerationChain( client=comprehend_client, verbose=True, # optional ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ( ModerationPiiError, ) template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comprehend_moderation | {"input": (lambda x: x["output"]) | llm} | comprehend_moderation ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?" } ) except ModerationPiiError as e: print(str(e)) else: print(response["output"]) from langchain_experimental.comprehend_moderation import ( BaseModerationConfig, ModerationPiiConfig, ModerationPromptSafetyConfig, ModerationToxicityConfig, ) pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") toxicity_config = ModerationToxicityConfig(threshold=0.5) prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.5) moderation_config = BaseModerationConfig( filters=[pii_config, toxicity_config, prompt_safety_config] ) comp_moderation_with_config = AmazonComprehendModerationChain( moderation_config=moderation_config, # specify the configuration client=comprehend_client, # optionally pass the Boto3 Client verbose=True, ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comp_moderation_with_config | {"input": (lambda x: x["output"]) | llm} | comp_moderation_with_config ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-45-7890. Can you give me some more samples?" } ) except Exception as e: print(str(e)) else: print(response["output"]) from langchain_experimental.comprehend_moderation import BaseModerationCallbackHandler class MyModCallback(BaseModerationCallbackHandler): async def on_after_pii(self, output_beacon, unique_id): import json moderation_type = output_beacon["moderation_type"] chain_id = output_beacon["moderation_chain_id"] with open(f"output-{moderation_type}-{chain_id}.json", "w") as file: data = {"beacon_data": output_beacon, "unique_id": unique_id} json.dump(data, file) """ async def on_after_toxicity(self, output_beacon, unique_id): pass async def on_after_prompt_safety(self, output_beacon, unique_id): pass """ my_callback = MyModCallback() pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") toxicity_config = ModerationToxicityConfig(threshold=0.5) moderation_config = BaseModerationConfig(filters=[pii_config, toxicity_config]) comp_moderation_with_config = AmazonComprehendModerationChain( moderation_config=moderation_config, # specify the configuration client=comprehend_client, # optionally pass the Boto3 Client unique_id="john.doe@email.com", # A unique ID moderation_callback=my_callback, # BaseModerationCallbackHandler verbose=True, ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comp_moderation_with_config | {"input": (lambda x: x["output"]) | llm} | comp_moderation_with_config ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-456-7890. Can you give me some more samples?" } ) except Exception as e: print(str(e)) else: print(response["output"]) get_ipython().run_line_magic('pip', 'install --upgrade --quiet huggingface_hub') import os os.environ["HUGGINGFACEHUB_API_TOKEN"] = "<YOUR HF TOKEN HERE>" repo_id = "google/flan-t5-xxl" from langchain.prompts import PromptTemplate from langchain_community.llms import HuggingFaceHub template = """{question}""" prompt = PromptTemplate.from_template(template) llm = HuggingFaceHub( repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 256} ) pii_config = ModerationPiiConfig( labels=["SSN", "CREDIT_DEBIT_NUMBER"], redact=True, mask_character="X" ) toxicity_config = ModerationToxicityConfig(threshold=0.5) prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.8) moderation_config_1 = BaseModerationConfig( filters=[pii_config, toxicity_config, prompt_safety_config] ) moderation_config_2 = BaseModerationConfig(filters=[pii_config]) amazon_comp_moderation = AmazonComprehendModerationChain( moderation_config=moderation_config_1, client=comprehend_client, moderation_callback=my_callback, verbose=True, ) amazon_comp_moderation_out = AmazonComprehendModerationChain( moderation_config=moderation_config_2, client=comprehend_client, verbose=True ) chain = ( prompt | amazon_comp_moderation | {"input": (lambda x: x["output"]) | llm} | amazon_comp_moderation_out ) try: response = chain.invoke( { "question": """What is John Doe's address, phone number and SSN from the following text? John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at johndoe@example.com reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place. """ } ) except Exception as e: print(str(e)) else: print(response["output"]) endpoint_name = "<SAGEMAKER_ENDPOINT_NAME>" # replace with your SageMaker Endpoint name region = "<REGION>" # replace with your SageMaker Endpoint region import json from langchain.prompts import PromptTemplate from langchain_community.llms import SagemakerEndpoint from langchain_community.llms.sagemaker_endpoint import LLMContentHandler class ContentHandler(LLMContentHandler): content_type = "application/json" accepts = "application/json" def transform_input(self, prompt: str, model_kwargs: dict) -> bytes: input_str = json.dumps({"text_inputs": prompt, **model_kwargs}) return input_str.encode("utf-8") def transform_output(self, output: bytes) -> str: response_json = json.loads(output.read().decode("utf-8")) return response_json["generated_texts"][0] content_handler = ContentHandler() template = """From the following 'Document', precisely answer the 'Question'. Do not add any spurious information in your answer. Document: John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at johndoe@example.com reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place. Question: {question} Answer: """ llm_prompt = PromptTemplate.from_template(template) llm = SagemakerEndpoint( endpoint_name=endpoint_name, region_name=region, model_kwargs={ "temperature": 0.95, "max_length": 200, "num_return_sequences": 3, "top_k": 50, "top_p": 0.95, "do_sample": True, }, content_handler=content_handler, ) pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") toxicity_config = ModerationToxicityConfig(threshold=0.5) moderation_config_1 =
BaseModerationConfig(filters=[pii_config, toxicity_config])
langchain_experimental.comprehend_moderation.BaseModerationConfig
SOURCE = "test" # @param {type:"Query"|"CollectionGroup"|"DocumentReference"|"string"} get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-datastore') PROJECT_ID = "my-project-id" # @param {type:"string"} get_ipython().system('gcloud config set project {PROJECT_ID}') from google.colab import auth auth.authenticate_user() get_ipython().system('gcloud services enable datastore.googleapis.com') from langchain_core.documents import Document from langchain_google_datastore import DatastoreSaver data = [Document(page_content="Hello, World!")] saver = DatastoreSaver() saver.upsert_documents(data) saver = DatastoreSaver("Collection") saver.upsert_documents(data) doc_ids = ["AnotherCollection/doc_id", "foo/bar"] saver = DatastoreSaver() saver.upsert_documents(documents=data, document_ids=doc_ids) from langchain_google_datastore import DatastoreLoader loader_collection = DatastoreLoader("Collection") loader_subcollection = DatastoreLoader("Collection/doc/SubCollection") data_collection = loader_collection.load() data_subcollection = loader_subcollection.load() from google.cloud import datastore client = datastore.Client() doc_ref = client.collection("foo").document("bar") loader_document =
DatastoreLoader(doc_ref)
langchain_google_datastore.DatastoreLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass() from operator import itemgetter from langchain.output_parsers import JsonOutputToolsParser from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough from langchain_core.tools import tool from langchain_openai import ChatOpenAI @tool def count_emails(last_n_days: int) -> int: """Multiply two integers together.""" return last_n_days * 2 @tool def send_email(message: str, recipient: str) -> str: "Add two integers." return f"Successfully sent email to {recipient}." tools = [count_emails, send_email] model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0).bind_tools(tools) def call_tool(tool_invocation: dict) -> Runnable: """Function for dynamically constructing the end of the chain based on the model-selected tool.""" tool_map = {tool.name: tool for tool in tools} tool = tool_map[tool_invocation["type"]] return RunnablePassthrough.assign(output=itemgetter("args") | tool) call_tool_list = RunnableLambda(call_tool).map() chain = model |
JsonOutputToolsParser()
langchain.output_parsers.JsonOutputToolsParser
import zipfile import requests def download_and_unzip(url: str, output_path: str = "file.zip") -> None: file_id = url.split("/")[-2] download_url = f"https://drive.google.com/uc?export=download&id={file_id}" response = requests.get(download_url) if response.status_code != 200: print("Failed to download the file.") return with open(output_path, "wb") as file: file.write(response.content) print(f"File {output_path} downloaded.") with zipfile.ZipFile(output_path, "r") as zip_ref: zip_ref.extractall() print(f"File {output_path} has been unzipped.") url = ( "https://drive.google.com/file/d/1rh1s1o2i7B-Sk1v9o8KNgivLVGwJ-osV/view?usp=sharing" ) download_and_unzip(url) directory_path = "./hogwarts" from langchain_community.chat_loaders.facebook_messenger import ( FolderFacebookMessengerChatLoader, SingleFileFacebookMessengerChatLoader, ) loader = SingleFileFacebookMessengerChatLoader( path="./hogwarts/inbox/HermioneGranger/messages_Hermione_Granger.json", ) chat_session = loader.load()[0] chat_session["messages"][:3] loader = FolderFacebookMessengerChatLoader( path="./hogwarts", ) chat_sessions = loader.load() len(chat_sessions) from langchain_community.chat_loaders.utils import ( map_ai_messages, merge_chat_runs, ) merged_sessions =
merge_chat_runs(chat_sessions)
langchain_community.chat_loaders.utils.merge_chat_runs
import os os.environ["LANGCHAIN_PROJECT"] = "movie-qa" import pandas as pd df = pd.read_csv("data/imdb_top_1000.csv") df["Released_Year"] = df["Released_Year"].astype(int, errors="ignore") from langchain.schema import Document from langchain_community.vectorstores import Chroma from langchain_openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() records = df.to_dict("records") documents = [
Document(page_content=d["Overview"], metadata=d)
langchain.schema.Document
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnablePassthrough from langchain_openai import ChatOpenAI template = """Answer the users question based only on the following context: <context> {context} </context> Question: {question} """ prompt = ChatPromptTemplate.from_template(template) model = ChatOpenAI(temperature=0) search = DuckDuckGoSearchAPIWrapper() def retriever(query): return search.run(query) chain = ( {"context": retriever, "question":
RunnablePassthrough()
langchain_core.runnables.RunnablePassthrough
from langchain_community.document_loaders import AsyncChromiumLoader from langchain_community.document_transformers import BeautifulSoupTransformer loader =
AsyncChromiumLoader(["https://www.wsj.com"])
langchain_community.document_loaders.AsyncChromiumLoader
get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-memorystore-redis') PROJECT_ID = "my-project-id" # @param {type:"string"} get_ipython().system('gcloud config set project {PROJECT_ID}') from google.colab import auth auth.authenticate_user() import redis from langchain_google_memorystore_redis import ( DistanceStrategy, HNSWConfig, RedisVectorStore, ) redis_client = redis.from_url("redis://127.0.0.1:6379") index_config = HNSWConfig( name="my_vector_index", distance_strategy=DistanceStrategy.COSINE, vector_size=128 )
RedisVectorStore.init_index(client=redis_client, index_config=index_config)
langchain_google_memorystore_redis.RedisVectorStore.init_index
from langchain_community.document_transformers.openai_functions import ( create_metadata_tagger, ) from langchain_core.documents import Document from langchain_openai import ChatOpenAI schema = { "properties": { "movie_title": {"type": "string"}, "critic": {"type": "string"}, "tone": {"type": "string", "enum": ["positive", "negative"]}, "rating": { "type": "integer", "description": "The number of stars the critic rated the movie", }, }, "required": ["movie_title", "critic", "tone"], } llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613") document_transformer = create_metadata_tagger(metadata_schema=schema, llm=llm) original_documents = [ Document( page_content="Review of The Bee Movie\nBy Roger Ebert\n\nThis is the greatest movie ever made. 4 out of 5 stars." ), Document( page_content="Review of The Godfather\nBy Anonymous\n\nThis movie was super boring. 1 out of 5 stars.", metadata={"reliable": False}, ), ] enhanced_documents = document_transformer.transform_documents(original_documents) import json print( *[d.page_content + "\n\n" + json.dumps(d.metadata) for d in enhanced_documents], sep="\n\n---------------\n\n", ) from typing import Literal from pydantic import BaseModel, Field class Properties(BaseModel): movie_title: str critic: str tone: Literal["positive", "negative"] rating: int = Field(description="Rating out of 5 stars") document_transformer =
create_metadata_tagger(Properties, llm)
langchain_community.document_transformers.openai_functions.create_metadata_tagger
from langchain_community.document_loaders import OBSDirectoryLoader endpoint = "your-endpoint" config = {"ak": "your-access-key", "sk": "your-secret-key"} loader = OBSDirectoryLoader("your-bucket-name", endpoint=endpoint, config=config) loader.load() loader = OBSDirectoryLoader( "your-bucket-name", endpoint=endpoint, config=config, prefix="test_prefix" ) loader.load() config = {"get_token_from_ecs": True} loader =
OBSDirectoryLoader("your-bucket-name", endpoint=endpoint, config=config)
langchain_community.document_loaders.OBSDirectoryLoader
from langchain_community.document_loaders.obs_file import OBSFileLoader endpoint = "your-endpoint" from obs import ObsClient obs_client = ObsClient( access_key_id="your-access-key", secret_access_key="your-secret-key", server=endpoint, ) loader = OBSFileLoader("your-bucket-name", "your-object-key", client=obs_client) loader.load() config = {"ak": "your-access-key", "sk": "your-secret-key"} loader = OBSFileLoader( "your-bucket-name", "your-object-key", endpoint=endpoint, config=config ) loader.load() config = {"get_token_from_ecs": True} loader = OBSFileLoader( "your-bucket-name", "your-object-key", endpoint=endpoint, config=config ) loader.load() loader =
OBSFileLoader("your-bucket-name", "your-object-key", endpoint=endpoint)
langchain_community.document_loaders.obs_file.OBSFileLoader
from ray import serve from starlette.requests import Request @serve.deployment class LLMServe: def __init__(self) -> None: pass async def __call__(self, request: Request) -> str: return "Hello World" deployment = LLMServe.bind() serve.api.run(deployment) serve.api.shutdown() from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_openai import OpenAI from getpass import getpass OPENAI_API_KEY = getpass() @serve.deployment class DeployLLM: def __init__(self): llm = OpenAI(openai_api_key=OPENAI_API_KEY) template = "Question: {question}\n\nAnswer: Let's think step by step." prompt = PromptTemplate.from_template(template) self.chain =
LLMChain(llm=llm, prompt=prompt)
langchain.chains.LLMChain
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') from langchain.evaluation import load_evaluator eval_chain = load_evaluator("pairwise_string") from langchain.evaluation.loading import load_dataset dataset = load_dataset("langchain-howto-queries") from langchain.agents import AgentType, Tool, initialize_agent from langchain_community.utilities import SerpAPIWrapper from langchain_openai import ChatOpenAI llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613") search =
SerpAPIWrapper()
langchain_community.utilities.SerpAPIWrapper
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain nuclia') from langchain_community.vectorstores.nucliadb import NucliaDB API_KEY = "YOUR_API_KEY" ndb =
NucliaDB(knowledge_box="YOUR_KB_ID", local=False, api_key=API_KEY)
langchain_community.vectorstores.nucliadb.NucliaDB
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)') get_ipython().system(' pip install "unstructured[all-docs]==0.10.19" pillow pydantic lxml pillow matplotlib tiktoken open_clip_torch torch') path = "/Users/rlm/Desktop/photos/" from unstructured.partition.pdf import partition_pdf raw_pdf_elements = partition_pdf( filename=path + "photos.pdf", extract_images_in_pdf=True, infer_table_structure=True, chunking_strategy="by_title", max_characters=4000, new_after_n_chars=3800, combine_text_under_n_chars=2000, image_output_dir_path=path, ) tables = [] texts = [] for element in raw_pdf_elements: if "unstructured.documents.elements.Table" in str(type(element)): tables.append(str(element)) elif "unstructured.documents.elements.CompositeElement" in str(type(element)): texts.append(str(element)) import os import uuid import chromadb import numpy as np from langchain_community.vectorstores import Chroma from langchain_experimental.open_clip import OpenCLIPEmbeddings from PIL import Image as _PILImage vectorstore = Chroma( collection_name="mm_rag_clip_photos", embedding_function=OpenCLIPEmbeddings() ) image_uris = sorted( [ os.path.join(path, image_name) for image_name in os.listdir(path) if image_name.endswith(".jpg") ] ) vectorstore.add_images(uris=image_uris) vectorstore.add_texts(texts=texts) retriever = vectorstore.as_retriever() import base64 import io from io import BytesIO import numpy as np from PIL import Image def resize_base64_image(base64_string, size=(128, 128)): """ Resize an image encoded as a Base64 string. Args: base64_string (str): Base64 string of the original image. size (tuple): Desired size of the image as (width, height). Returns: str: Base64 string of the resized image. """ img_data = base64.b64decode(base64_string) img = Image.open(io.BytesIO(img_data)) resized_img = img.resize(size, Image.LANCZOS) buffered = io.BytesIO() resized_img.save(buffered, format=img.format) return base64.b64encode(buffered.getvalue()).decode("utf-8") def is_base64(s): """Check if a string is Base64 encoded""" try: return base64.b64encode(base64.b64decode(s)) == s.encode() except Exception: return False def split_image_text_types(docs): """Split numpy array images and texts""" images = [] text = [] for doc in docs: doc = doc.page_content # Extract Document contents if is_base64(doc): images.append( resize_base64_image(doc, size=(250, 250)) ) # base64 encoded str else: text.append(doc) return {"images": images, "texts": text} from operator import itemgetter from langchain_core.messages import HumanMessage, SystemMessage from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnableLambda, RunnablePassthrough from langchain_openai import ChatOpenAI def prompt_func(data_dict): formatted_texts = "\n".join(data_dict["context"]["texts"]) messages = [] if data_dict["context"]["images"]: image_message = { "type": "image_url", "image_url": { "url": f"data:image/jpeg;base64,{data_dict['context']['images'][0]}" }, } messages.append(image_message) text_message = { "type": "text", "text": ( "As an expert art critic and historian, your task is to analyze and interpret images, " "considering their historical and cultural significance. Alongside the images, you will be " "provided with related text to offer context. Both will be retrieved from a vectorstore based " "on user-input keywords. Please use your extensive knowledge and analytical skills to provide a " "comprehensive summary that includes:\n" "- A detailed description of the visual elements in the image.\n" "- The historical and cultural context of the image.\n" "- An interpretation of the image's symbolism and meaning.\n" "- Connections between the image and the related text.\n\n" f"User-provided keywords: {data_dict['question']}\n\n" "Text and / or tables:\n" f"{formatted_texts}" ), } messages.append(text_message) return [
HumanMessage(content=messages)
langchain_core.messages.HumanMessage
import os from langchain.indexes import VectorstoreIndexCreator from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from langchain_community.document_loaders.figma import FigmaFileLoader from langchain_openai import ChatOpenAI figma_loader = FigmaFileLoader( os.environ.get("ACCESS_TOKEN"), os.environ.get("NODE_IDS"), os.environ.get("FILE_KEY"), ) index = VectorstoreIndexCreator().from_loaders([figma_loader]) figma_doc_retriever = index.vectorstore.as_retriever() def generate_code(human_input): system_prompt_template = """You are expert coder Jon Carmack. Use the provided design context to create idiomatic HTML/CSS code as possible based on the user request. Everything must be inline in one file and your response must be directly renderable by the browser. Figma file nodes and metadata: {context}""" human_prompt_template = "Code the {text}. Ensure it's mobile responsive" system_message_prompt = SystemMessagePromptTemplate.from_template( system_prompt_template ) human_message_prompt = HumanMessagePromptTemplate.from_template( human_prompt_template ) gpt_4 = ChatOpenAI(temperature=0.02, model_name="gpt-4") relevant_nodes = figma_doc_retriever.get_relevant_documents(human_input) conversation = [system_message_prompt, human_message_prompt] chat_prompt =
ChatPromptTemplate.from_messages(conversation)
langchain.prompts.chat.ChatPromptTemplate.from_messages
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-nvidia-ai-endpoints') import getpass import os if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"): nvapi_key = getpass.getpass("Enter your NVIDIA API key: ") assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key" os.environ["NVIDIA_API_KEY"] = nvapi_key from langchain_nvidia_ai_endpoints import ChatNVIDIA llm = ChatNVIDIA(model="mixtral_8x7b") result = llm.invoke("Write a ballad about LangChain.") print(result.content) print(llm.batch(["What's 2*3?", "What's 2*6?"])) for chunk in llm.stream("How far can a seagull fly in one day?"): print(chunk.content, end="|") async for chunk in llm.astream( "How long does it take for monarch butterflies to migrate?" ): print(chunk.content, end="|") ChatNVIDIA.get_available_models() from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_nvidia_ai_endpoints import ChatNVIDIA prompt = ChatPromptTemplate.from_messages( [("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")] ) chain = prompt | ChatNVIDIA(model="llama2_13b") | StrOutputParser() for txt in chain.stream({"input": "What's your name?"}): print(txt, end="") prompt = ChatPromptTemplate.from_messages( [ ( "system", "You are an expert coding AI. Respond only in valid python; no narration whatsoever.", ), ("user", "{input}"), ] ) chain = prompt | ChatNVIDIA(model="llama2_code_70b") | StrOutputParser() for txt in chain.stream({"input": "How do I solve this fizz buzz problem?"}): print(txt, end="") from langchain_nvidia_ai_endpoints import ChatNVIDIA llm = ChatNVIDIA(model="nemotron_steerlm_8b") complex_result = llm.invoke( "What's a PB&J?", labels={"creativity": 0, "complexity": 3, "verbosity": 0} ) print("Un-creative\n") print(complex_result.content) print("\n\nCreative\n") creative_result = llm.invoke( "What's a PB&J?", labels={"creativity": 9, "complexity": 3, "verbosity": 9} ) print(creative_result.content) from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_nvidia_ai_endpoints import ChatNVIDIA prompt = ChatPromptTemplate.from_messages( [("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")] ) chain = ( prompt | ChatNVIDIA(model="nemotron_steerlm_8b").bind( labels={"creativity": 9, "complexity": 0, "verbosity": 9} ) | StrOutputParser() ) for txt in chain.stream({"input": "Why is a PB&J?"}): print(txt, end="") import IPython import requests image_url = "https://www.nvidia.com/content/dam/en-zz/Solutions/research/ai-playground/nvidia-picasso-3c33-p@2x.jpg" ## Large Image image_content = requests.get(image_url).content IPython.display.Image(image_content) from langchain_nvidia_ai_endpoints import ChatNVIDIA llm = ChatNVIDIA(model="playground_neva_22b") from langchain_core.messages import HumanMessage llm.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, {"type": "image_url", "image_url": {"url": image_url}}, ] ) ] ) from langchain_core.messages import HumanMessage llm.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, {"type": "image_url", "image_url": {"url": image_url}}, ] ) ], labels={"creativity": 0, "quality": 9, "complexity": 0, "verbosity": 0}, ) import IPython import requests image_url = "https://picsum.photos/seed/kitten/300/200" image_content = requests.get(image_url).content IPython.display.Image(image_content) import base64 from langchain_core.messages import HumanMessage b64_string = base64.b64encode(image_content).decode("utf-8") llm.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, { "type": "image_url", "image_url": {"url": f"data:image/png;base64,{b64_string}"}, }, ] ) ] ) base64_with_mime_type = f"data:image/png;base64,{b64_string}" llm.invoke(f'What\'s in this image?\n<img src="{base64_with_mime_type}" />') from langchain_nvidia_ai_endpoints import ChatNVIDIA kosmos = ChatNVIDIA(model="kosmos_2") from langchain_core.messages import HumanMessage def drop_streaming_key(d): """Takes in payload dictionary, outputs new payload dictionary""" if "stream" in d: d.pop("stream") return d kosmos = ChatNVIDIA(model="kosmos_2") kosmos.client.payload_fn = drop_streaming_key kosmos.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, {"type": "image_url", "image_url": {"url": image_url}}, ] ) ] ) import base64 from io import BytesIO from PIL import Image img_gen = ChatNVIDIA(model="sdxl_turbo") def to_sdxl_payload(d): if d: d = {"prompt": d.get("messages", [{}])[0].get("content")} d["inference_steps"] = 4 ## why not add another argument? return d img_gen.client.payload_fn = to_sdxl_payload def to_pil_img(d): return Image.open(BytesIO(base64.b64decode(d))) (img_gen | StrOutputParser() | to_pil_img).invoke("white cat playing") from langchain_core.messages import ChatMessage from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_nvidia_ai_endpoints import ChatNVIDIA prompt = ChatPromptTemplate.from_messages( [ ChatMessage( role="context", content="Parrots and Cats have signed the peace accord." ), ("user", "{input}"), ] ) llm = ChatNVIDIA(model="nemotron_qa_8b") chain = prompt | llm |
StrOutputParser()
langchain_core.output_parsers.StrOutputParser
from langchain_community.chat_models import ChatDatabricks from langchain_core.messages import HumanMessage from mlflow.deployments import get_deploy_client client = get_deploy_client("databricks") secret = "secrets/<scope>/openai-api-key" # replace `<scope>` with your scope name = "my-chat" # rename this if my-chat already exists client.create_endpoint( name=name, config={ "served_entities": [ { "name": "my-chat", "external_model": { "name": "gpt-4", "provider": "openai", "task": "llm/v1/chat", "openai_config": { "openai_api_key": "{{" + secret + "}}", }, }, } ], }, ) chat = ChatDatabricks( target_uri="databricks", endpoint=name, temperature=0.1, ) chat([HumanMessage(content="hello")]) from langchain_community.embeddings import DatabricksEmbeddings embeddings = DatabricksEmbeddings(endpoint="databricks-bge-large-en") embeddings.embed_query("hello")[:3] from langchain_community.llms import Databricks llm =
Databricks(endpoint_name="dolly")
langchain_community.llms.Databricks
model_url = "http://localhost:5000" from langchain.chains import LLMChain from langchain.globals import set_debug from langchain.prompts import PromptTemplate from langchain_community.llms import TextGen set_debug(True) template = """Question: {question} Answer: Let's think step by step.""" prompt = PromptTemplate.from_template(template) llm = TextGen(model_url=model_url) llm_chain = LLMChain(prompt=prompt, llm=llm) question = "What NFL team won the Super Bowl in the year Justin Bieber was born?" llm_chain.run(question) model_url = "ws://localhost:5005" from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.chains import LLMChain from langchain.globals import set_debug from langchain.prompts import PromptTemplate from langchain_community.llms import TextGen set_debug(True) template = """Question: {question} Answer: Let's think step by step.""" prompt = PromptTemplate.from_template(template) llm = TextGen( model_url=model_url, streaming=True, callbacks=[StreamingStdOutCallbackHandler()] ) llm_chain = LLMChain(prompt=prompt, llm=llm) question = "What NFL team won the Super Bowl in the year Justin Bieber was born?" llm_chain.run(question) llm =
TextGen(model_url=model_url, streaming=True)
langchain_community.llms.TextGen
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai context-python') import os from langchain.callbacks import ContextCallbackHandler token = os.environ["CONTEXT_API_TOKEN"] context_callback = ContextCallbackHandler(token) import os from langchain.callbacks import ContextCallbackHandler from langchain.schema import ( HumanMessage, SystemMessage, ) from langchain_openai import ChatOpenAI token = os.environ["CONTEXT_API_TOKEN"] chat = ChatOpenAI( headers={"user_id": "123"}, temperature=0, callbacks=[
ContextCallbackHandler(token)
langchain.callbacks.ContextCallbackHandler
get_ipython().run_line_magic('pip', 'install --upgrade --quiet docx2txt') from langchain_community.document_loaders import Docx2txtLoader loader = Docx2txtLoader("example_data/fake.docx") data = loader.load() data from langchain_community.document_loaders import UnstructuredWordDocumentLoader loader = UnstructuredWordDocumentLoader("example_data/fake.docx") data = loader.load() data loader =
UnstructuredWordDocumentLoader("example_data/fake.docx", mode="elements")
langchain_community.document_loaders.UnstructuredWordDocumentLoader
import os import chromadb from langchain.retrievers import ContextualCompressionRetriever from langchain.retrievers.document_compressors import DocumentCompressorPipeline from langchain.retrievers.merger_retriever import MergerRetriever from langchain_community.document_transformers import ( EmbeddingsClusteringFilter, EmbeddingsRedundantFilter, ) from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import Chroma from langchain_openai import OpenAIEmbeddings all_mini = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") multi_qa_mini = HuggingFaceEmbeddings(model_name="multi-qa-MiniLM-L6-dot-v1") filter_embeddings = OpenAIEmbeddings() ABS_PATH = os.path.dirname(os.path.abspath(__file__)) DB_DIR = os.path.join(ABS_PATH, "db") client_settings = chromadb.config.Settings( is_persistent=True, persist_directory=DB_DIR, anonymized_telemetry=False, ) db_all = Chroma( collection_name="project_store_all", persist_directory=DB_DIR, client_settings=client_settings, embedding_function=all_mini, ) db_multi_qa = Chroma( collection_name="project_store_multi", persist_directory=DB_DIR, client_settings=client_settings, embedding_function=multi_qa_mini, ) retriever_all = db_all.as_retriever( search_type="similarity", search_kwargs={"k": 5, "include_metadata": True} ) retriever_multi_qa = db_multi_qa.as_retriever( search_type="mmr", search_kwargs={"k": 5, "include_metadata": True} ) lotr = MergerRetriever(retrievers=[retriever_all, retriever_multi_qa]) filter = EmbeddingsRedundantFilter(embeddings=filter_embeddings) pipeline = DocumentCompressorPipeline(transformers=[filter]) compression_retriever = ContextualCompressionRetriever( base_compressor=pipeline, base_retriever=lotr ) filter_ordered_cluster = EmbeddingsClusteringFilter( embeddings=filter_embeddings, num_clusters=10, num_closest=1, ) filter_ordered_by_retriever = EmbeddingsClusteringFilter( embeddings=filter_embeddings, num_clusters=10, num_closest=1, sorted=True, ) pipeline = DocumentCompressorPipeline(transformers=[filter_ordered_by_retriever]) compression_retriever = ContextualCompressionRetriever( base_compressor=pipeline, base_retriever=lotr ) from langchain_community.document_transformers import LongContextReorder filter = EmbeddingsRedundantFilter(embeddings=filter_embeddings) reordering =
LongContextReorder()
langchain_community.document_transformers.LongContextReorder
get_ipython().system(' docker run -d -p 8123:8123 -p9000:9000 --name langchain-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server:23.4.2.11') get_ipython().run_line_magic('pip', 'install --upgrade --quiet clickhouse-connect') import getpass import os if not os.environ["OPENAI_API_KEY"]: os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") from langchain_community.vectorstores import Clickhouse, ClickhouseSettings from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter from langchain_community.document_loaders import TextLoader loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() for d in docs: d.metadata = {"some": "metadata"} settings = ClickhouseSettings(table="clickhouse_vector_search_example") docsearch =
Clickhouse.from_documents(docs, embeddings, config=settings)
langchain_community.vectorstores.Clickhouse.from_documents
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sentence_transformers') from langchain_community.embeddings import HuggingFaceEmbeddings embeddings = HuggingFaceEmbeddings() from langchain_community.document_loaders import TextLoader from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=400, chunk_overlap=0) docs = text_splitter.split_documents(documents) print(len(docs)) import getpass import os os.environ["SEMADB_API_KEY"] = getpass.getpass("SemaDB API Key:") from langchain_community.vectorstores import SemaDB from langchain_community.vectorstores.utils import DistanceStrategy db =
SemaDB("mycollection", 768, embeddings, DistanceStrategy.COSINE)
langchain_community.vectorstores.SemaDB
from langchain_experimental.llm_bash.base import LLMBashChain from langchain_openai import OpenAI llm = OpenAI(temperature=0) text = "Please write a bash script that prints 'Hello World' to the console." bash_chain = LLMBashChain.from_llm(llm, verbose=True) bash_chain.run(text) from langchain.prompts.prompt import PromptTemplate from langchain_experimental.llm_bash.prompt import BashOutputParser _PROMPT_TEMPLATE = """If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put "#!/bin/bash" in your answer. Make sure to reason step by step, using this format: Question: "copy the files in the directory named 'target' into a new directory at the same level as target called 'myNewDirectory'" I need to take the following actions: - List all files in the directory - Create a new directory - Copy the files from the first directory into the second directory ```bash ls mkdir myNewDirectory cp -r target/* myNewDirectory ``` Do not use 'echo' when writing the script. That is the format. Begin! Question: {question}""" PROMPT = PromptTemplate( input_variables=["question"], template=_PROMPT_TEMPLATE, output_parser=
BashOutputParser()
langchain_experimental.llm_bash.prompt.BashOutputParser
from langchain.memory import ConversationKGMemory from langchain_openai import OpenAI llm = OpenAI(temperature=0) memory = ConversationKGMemory(llm=llm) memory.save_context({"input": "say hi to sam"}, {"output": "who is sam"}) memory.save_context({"input": "sam is a friend"}, {"output": "okay"}) memory.load_memory_variables({"input": "who is sam"}) memory = ConversationKGMemory(llm=llm, return_messages=True) memory.save_context({"input": "say hi to sam"}, {"output": "who is sam"}) memory.save_context({"input": "sam is a friend"}, {"output": "okay"}) memory.load_memory_variables({"input": "who is sam"}) memory.get_current_entities("what's Sams favorite color?") memory.get_knowledge_triplets("her favorite color is red") llm = OpenAI(temperature=0) from langchain.chains import ConversationChain from langchain.prompts.prompt import PromptTemplate template = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. The AI ONLY uses information contained in the "Relevant Information" section and does not hallucinate. Relevant Information: {history} Conversation: Human: {input} AI:""" prompt = PromptTemplate(input_variables=["history", "input"], template=template) conversation_with_kg = ConversationChain( llm=llm, verbose=True, prompt=prompt, memory=
ConversationKGMemory(llm=llm)
langchain.memory.ConversationKGMemory
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-cloud-storage') from langchain_community.document_loaders import GCSDirectoryLoader loader =
GCSDirectoryLoader(project_name="aist", bucket="testing-hwc")
langchain_community.document_loaders.GCSDirectoryLoader
get_ipython().run_line_magic('', 'pip install --upgrade --quiet flashrank') get_ipython().run_line_magic('', 'pip install --upgrade --quiet faiss') get_ipython().run_line_magic('', 'pip install --upgrade --quiet faiss_cpu') def pretty_print_docs(docs): print( f"\n{'-' * 100}\n".join( [f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)] ) ) import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass() from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import FAISS from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter documents = TextLoader( "../../modules/state_of_the_union.txt", ).load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100) texts = text_splitter.split_documents(documents) embedding = OpenAIEmbeddings(model="text-embedding-ada-002") retriever = FAISS.from_documents(texts, embedding).as_retriever(search_kwargs={"k": 20}) query = "What did the president say about Ketanji Brown Jackson" docs = retriever.get_relevant_documents(query) pretty_print_docs(docs) from langchain.retrievers import ContextualCompressionRetriever from langchain.retrievers.document_compressors import FlashrankRerank from langchain_openai import ChatOpenAI llm = ChatOpenAI(temperature=0) compressor =
FlashrankRerank()
langchain.retrievers.document_compressors.FlashrankRerank
get_ipython().run_line_magic('pip', 'install --upgrade --quiet text-generation transformers google-search-results numexpr langchainhub sentencepiece jinja2') import os from langchain_community.llms import HuggingFaceTextGenInference ENDPOINT_URL = "<YOUR_ENDPOINT_URL_HERE>" HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") llm = HuggingFaceTextGenInference( inference_server_url=ENDPOINT_URL, max_new_tokens=512, top_k=50, temperature=0.1, repetition_penalty=1.03, server_kwargs={ "headers": { "Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json", } }, ) from langchain_community.llms import HuggingFaceEndpoint ENDPOINT_URL = "<YOUR_ENDPOINT_URL_HERE>" llm = HuggingFaceEndpoint( endpoint_url=ENDPOINT_URL, task="text-generation", model_kwargs={ "max_new_tokens": 512, "top_k": 50, "temperature": 0.1, "repetition_penalty": 1.03, }, ) from langchain_community.llms import HuggingFaceHub llm = HuggingFaceHub( repo_id="HuggingFaceH4/zephyr-7b-beta", task="text-generation", model_kwargs={ "max_new_tokens": 512, "top_k": 30, "temperature": 0.1, "repetition_penalty": 1.03, }, ) from langchain.schema import ( HumanMessage, SystemMessage, ) from langchain_community.chat_models.huggingface import ChatHuggingFace messages = [ SystemMessage(content="You're a helpful assistant"), HumanMessage( content="What happens when an unstoppable force meets an immovable object?" ), ] chat_model = ChatHuggingFace(llm=llm) chat_model.model_id chat_model._to_chat_prompt(messages) res = chat_model.invoke(messages) print(res.content) from langchain import hub from langchain.agents import AgentExecutor, load_tools from langchain.agents.format_scratchpad import format_log_to_str from langchain.agents.output_parsers import ( ReActJsonSingleInputOutputParser, ) from langchain.tools.render import render_text_description from langchain_community.utilities import SerpAPIWrapper tools = load_tools(["serpapi", "llm-math"], llm=llm) prompt = hub.pull("hwchase17/react-json") prompt = prompt.partial( tools=
render_text_description(tools)
langchain.tools.render.render_text_description
get_ipython().run_line_magic('pip', 'install --upgrade --quiet fastembed') from langchain_community.embeddings.fastembed import FastEmbedEmbeddings embeddings =
FastEmbedEmbeddings()
langchain_community.embeddings.fastembed.FastEmbedEmbeddings
get_ipython().system('pip install gymnasium') import tenacity from langchain.output_parsers import RegexParser from langchain.schema import ( HumanMessage, SystemMessage, ) class GymnasiumAgent: @classmethod def get_docs(cls, env): return env.unwrapped.__doc__ def __init__(self, model, env): self.model = model self.env = env self.docs = self.get_docs(env) self.instructions = """ Your goal is to maximize your return, i.e. the sum of the rewards you receive. I will give you an observation, reward, terminiation flag, truncation flag, and the return so far, formatted as: Observation: <observation> Reward: <reward> Termination: <termination> Truncation: <truncation> Return: <sum_of_rewards> You will respond with an action, formatted as: Action: <action> where you replace <action> with your actual action. Do nothing else but return the action. """ self.action_parser = RegexParser( regex=r"Action: (.*)", output_keys=["action"], default_output_key="action" ) self.message_history = [] self.ret = 0 def random_action(self): action = self.env.action_space.sample() return action def reset(self): self.message_history = [ SystemMessage(content=self.docs), SystemMessage(content=self.instructions), ] def observe(self, obs, rew=0, term=False, trunc=False, info=None): self.ret += rew obs_message = f""" Observation: {obs} Reward: {rew} Termination: {term} Truncation: {trunc} Return: {self.ret} """ self.message_history.append(
HumanMessage(content=obs_message)
langchain.schema.HumanMessage
SOURCE = "test" # @param {type:"Query"|"CollectionGroup"|"DocumentReference"|"string"} get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-firestore') PROJECT_ID = "my-project-id" # @param {type:"string"} get_ipython().system('gcloud config set project {PROJECT_ID}') from google.colab import auth auth.authenticate_user() get_ipython().system('gcloud services enable firestore.googleapis.com') from langchain_core.documents.base import Document from langchain_google_firestore import FirestoreSaver saver = FirestoreSaver() data = [Document(page_content="Hello, World!")] saver.upsert_documents(data) saver = FirestoreSaver("Collection") saver.upsert_documents(data) doc_ids = ["AnotherCollection/doc_id", "foo/bar"] saver = FirestoreSaver() saver.upsert_documents(documents=data, document_ids=doc_ids) from langchain_google_firestore import FirestoreLoader loader_collection = FirestoreLoader("Collection") loader_subcollection = FirestoreLoader("Collection/doc/SubCollection") data_collection = loader_collection.load() data_subcollection = loader_subcollection.load() from google.cloud import firestore client = firestore.Client() doc_ref = client.collection("foo").document("bar") loader_document = FirestoreLoader(doc_ref) data = loader_document.load() from google.cloud.firestore import CollectionGroup, FieldFilter, Query col_ref = client.collection("col_group") collection_group = CollectionGroup(col_ref) loader_group = FirestoreLoader(collection_group) col_ref = client.collection("collection") query = col_ref.where(filter=FieldFilter("region", "==", "west_coast")) loader_query =
FirestoreLoader(query)
langchain_google_firestore.FirestoreLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet duckdb') from langchain_community.document_loaders import DuckDBLoader get_ipython().run_cell_magic('file', 'example.csv', 'Team,Payroll\nNationals,81.34\nReds,82.20\n') loader =
DuckDBLoader("SELECT * FROM read_csv_auto('example.csv')")
langchain_community.document_loaders.DuckDBLoader
get_ipython().system('pip install -qU langchain-ibm') import os from getpass import getpass watsonx_api_key = getpass() os.environ["WATSONX_APIKEY"] = watsonx_api_key import os os.environ["WATSONX_URL"] = "your service instance url" os.environ["WATSONX_TOKEN"] = "your token for accessing the CPD cluster" os.environ["WATSONX_PASSWORD"] = "your password for accessing the CPD cluster" os.environ["WATSONX_USERNAME"] = "your username for accessing the CPD cluster" os.environ["WATSONX_INSTANCE_ID"] = "your instance_id for accessing the CPD cluster" parameters = { "decoding_method": "sample", "max_new_tokens": 100, "min_new_tokens": 1, "temperature": 0.5, "top_k": 50, "top_p": 1, } from langchain_ibm import WatsonxLLM watsonx_llm = WatsonxLLM( model_id="ibm/granite-13b-instruct-v2", url="https://us-south.ml.cloud.ibm.com", project_id="PASTE YOUR PROJECT_ID HERE", params=parameters, ) watsonx_llm = WatsonxLLM( model_id="ibm/granite-13b-instruct-v2", url="PASTE YOUR URL HERE", username="PASTE YOUR USERNAME HERE", password="PASTE YOUR PASSWORD HERE", instance_id="openshift", version="4.8", project_id="PASTE YOUR PROJECT_ID HERE", params=parameters, ) watsonx_llm = WatsonxLLM( deployment_id="PASTE YOUR DEPLOYMENT_ID HERE", url="https://us-south.ml.cloud.ibm.com", project_id="PASTE YOUR PROJECT_ID HERE", params=parameters, ) from langchain.prompts import PromptTemplate template = "Generate a random question about {topic}: Question: " prompt = PromptTemplate.from_template(template) from langchain.chains import LLMChain llm_chain =
LLMChain(prompt=prompt, llm=watsonx_llm)
langchain.chains.LLMChain
from langchain_community.embeddings import FakeEmbeddings from langchain_community.vectorstores import Vectara from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnableLambda, RunnablePassthrough vectara = Vectara.from_files(["state_of_the_union.txt"]) summary_config = {"is_enabled": True, "max_results": 5, "response_lang": "eng"} retriever = vectara.as_retriever( search_kwargs={"k": 3, "summary_config": summary_config} ) def get_sources(documents): return documents[:-1] def get_summary(documents): return documents[-1].page_content query_str = "what did Biden say?" (retriever | get_summary).invoke(query_str) (retriever | get_sources).invoke(query_str) from langchain.retrievers.multi_query import MultiQueryRetriever from langchain_openai import ChatOpenAI llm = ChatOpenAI(temperature=0) mqr =
MultiQueryRetriever.from_llm(retriever=retriever, llm=llm)
langchain.retrievers.multi_query.MultiQueryRetriever.from_llm
from langchain.indexes import SQLRecordManager, index from langchain_core.documents import Document from langchain_elasticsearch import ElasticsearchStore from langchain_openai import OpenAIEmbeddings collection_name = "test_index" embedding = OpenAIEmbeddings() vectorstore = ElasticsearchStore( es_url="http://localhost:9200", index_name="test_index", embedding=embedding ) namespace = f"elasticsearch/{collection_name}" record_manager = SQLRecordManager( namespace, db_url="sqlite:///record_manager_cache.sql" ) record_manager.create_schema() doc1 = Document(page_content="kitty", metadata={"source": "kitty.txt"}) doc2 = Document(page_content="doggy", metadata={"source": "doggy.txt"}) def _clear(): """Hacky helper method to clear content. See the `full` mode section to to understand why it works.""" index([], record_manager, vectorstore, cleanup="full", source_id_key="source") _clear() index( [doc1, doc1, doc1, doc1, doc1], record_manager, vectorstore, cleanup=None, source_id_key="source", ) _clear() index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source")
index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source")
langchain.indexes.index
get_ipython().system('poetry run pip install dgml-utils==0.3.0 --upgrade --quiet') import os from langchain_community.document_loaders import DocugamiLoader DOCUGAMI_API_KEY = os.environ.get("DOCUGAMI_API_KEY") docset_id = "26xpy3aes7xp" document_ids = ["d7jqdzcj50sj", "cgd1eacfkchw"] loader = DocugamiLoader(docset_id=docset_id, document_ids=document_ids) chunks = loader.load() len(chunks) loader.min_text_length = 64 loader.include_xml_tags = True chunks = loader.load() for chunk in chunks[:5]: print(chunk) get_ipython().system('poetry run pip install --upgrade langchain-openai tiktoken chromadb hnswlib') loader =
DocugamiLoader(docset_id="zo954yqy53wp")
langchain_community.document_loaders.DocugamiLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet cohere') get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss') get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss-cpu') import getpass import os os.environ["COHERE_API_KEY"] = getpass.getpass("Cohere API Key:") def pretty_print_docs(docs): print( f"\n{'-' * 100}\n".join( [f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)] ) ) from langchain_community.document_loaders import TextLoader from langchain_community.embeddings import CohereEmbeddings from langchain_community.vectorstores import FAISS from langchain_text_splitters import RecursiveCharacterTextSplitter documents = TextLoader("../../modules/state_of_the_union.txt").load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100) texts = text_splitter.split_documents(documents) retriever = FAISS.from_documents(texts, CohereEmbeddings()).as_retriever( search_kwargs={"k": 20} ) query = "What did the president say about Ketanji Brown Jackson" docs = retriever.get_relevant_documents(query) pretty_print_docs(docs) from langchain.retrievers import ContextualCompressionRetriever from langchain.retrievers.document_compressors import CohereRerank from langchain_community.llms import Cohere llm = Cohere(temperature=0) compressor =
CohereRerank()
langchain.retrievers.document_compressors.CohereRerank
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-core langchain-experimental langchain-openai') from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ( ChatPromptTemplate, ) from langchain_experimental.utilities import PythonREPL from langchain_openai import ChatOpenAI template = """Write some python code to solve the user's problem. Return only python code in Markdown format, e.g.: ```python .... ```""" prompt = ChatPromptTemplate.from_messages([("system", template), ("human", "{input}")]) model = ChatOpenAI() def _sanitize_output(text: str): _, after = text.split("```python") return after.split("```")[0] chain = prompt | model | StrOutputParser() | _sanitize_output |
PythonREPL()
langchain_experimental.utilities.PythonREPL
get_ipython().run_line_magic('pip', 'install --upgrade --quiet opencv-python scikit-image') import os from langchain_openai import OpenAI os.environ["OPENAI_API_KEY"] = "<your-key-here>" from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper from langchain_openai import OpenAI llm =
OpenAI(temperature=0.9)
langchain_openai.OpenAI
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai') get_ipython().run_line_magic('pip', 'install --upgrade --quiet psycopg2-binary') get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken') YBUSER = "[SANDBOX USER]" YBPASSWORD = "[SANDBOX PASSWORD]" YBDATABASE = "[SANDBOX_DATABASE]" YBHOST = "trialsandbox.sandbox.aws.yellowbrickcloud.com" OPENAI_API_KEY = "[OPENAI API KEY]" import os import pathlib import re import sys import urllib.parse as urlparse from getpass import getpass import psycopg2 from IPython.display import Markdown, display from langchain.chains import LLMChain, RetrievalQAWithSourcesChain from langchain.docstore.document import Document from langchain_community.vectorstores import Yellowbrick from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter yellowbrick_connection_string = ( f"postgres://{urlparse.quote(YBUSER)}:{YBPASSWORD}@{YBHOST}:5432/{YBDATABASE}" ) YB_DOC_DATABASE = "sample_data" YB_DOC_TABLE = "yellowbrick_documentation" embedding_table = "my_embeddings" os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) system_template = """If you don't know the answer, Make up your best guess.""" messages = [ SystemMessagePromptTemplate.from_template(system_template), HumanMessagePromptTemplate.from_template("{question}"), ] prompt = ChatPromptTemplate.from_messages(messages) chain_type_kwargs = {"prompt": prompt} llm = ChatOpenAI( model_name="gpt-3.5-turbo", # Modify model_name if you have access to GPT-4 temperature=0, max_tokens=256, ) chain = LLMChain( llm=llm, prompt=prompt, verbose=False, ) def print_result_simple(query): result = chain(query) output_text = f"""### Question: {query} {result['text']} """ display(Markdown(output_text)) print_result_simple("How many databases can be in a Yellowbrick Instance?") print_result_simple("What's an easy way to add users in bulk to Yellowbrick?") try: conn = psycopg2.connect(yellowbrick_connection_string) except psycopg2.Error as e: print(f"Error connecting to the database: {e}") exit(1) cursor = conn.cursor() create_table_query = f""" CREATE TABLE if not exists {embedding_table} ( id uuid, embedding_id integer, text character varying(60000), metadata character varying(1024), embedding double precision ) DISTRIBUTE ON (id); truncate table {embedding_table}; """ try: cursor.execute(create_table_query) print(f"Table '{embedding_table}' created successfully!") except psycopg2.Error as e: print(f"Error creating table: {e}") conn.rollback() conn.commit() cursor.close() conn.close() yellowbrick_doc_connection_string = ( f"postgres://{urlparse.quote(YBUSER)}:{YBPASSWORD}@{YBHOST}:5432/{YB_DOC_DATABASE}" ) conn = psycopg2.connect(yellowbrick_doc_connection_string) cursor = conn.cursor() query = f"SELECT path, document FROM {YB_DOC_TABLE}" cursor.execute(query) yellowbrick_documents = cursor.fetchall() print(f"Extracted {len(yellowbrick_documents)} documents successfully!") cursor.close() conn.close() DOCUMENT_BASE_URL = "https://docs.yellowbrick.com/6.7.1/" # Actual URL separator = "\n## " # This separator assumes Markdown docs from the repo uses ### as logical main header most of the time chunk_size_limit = 2000 max_chunk_overlap = 200 documents = [ Document( page_content=document[1], metadata={"source": DOCUMENT_BASE_URL + document[0].replace(".md", ".html")}, ) for document in yellowbrick_documents ] text_splitter = RecursiveCharacterTextSplitter( chunk_size=chunk_size_limit, chunk_overlap=max_chunk_overlap, separators=[separator, "\nn", "\n", ",", " ", ""], ) split_docs = text_splitter.split_documents(documents) docs_text = [doc.page_content for doc in split_docs] embeddings = OpenAIEmbeddings() vector_store = Yellowbrick.from_documents( documents=split_docs, embedding=embeddings, connection_string=yellowbrick_connection_string, table=embedding_table, ) print(f"Created vector store with {len(documents)} documents") system_template = """Use the following pieces of context to answer the users question. Take note of the sources and include them in the answer in the format: "SOURCES: source1 source2", use "SOURCES" in capital letters regardless of the number of sources. If you don't know the answer, just say that "I don't know", don't try to make up an answer. ---------------- {summaries}""" messages = [ SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}")
langchain.prompts.chat.HumanMessagePromptTemplate.from_template
from langchain_community.tools.edenai import ( EdenAiExplicitImageTool, EdenAiObjectDetectionTool, EdenAiParsingIDTool, EdenAiParsingInvoiceTool, EdenAiSpeechToTextTool, EdenAiTextModerationTool, EdenAiTextToSpeechTool, ) from langchain.agents import AgentType, initialize_agent from langchain_community.llms import EdenAI llm = EdenAI( feature="text", provider="openai", params={"temperature": 0.2, "max_tokens": 250} ) tools = [
EdenAiTextModerationTool(providers=["openai"], language="en")
langchain_community.tools.edenai.EdenAiTextModerationTool
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_openai.chat_models import ChatOpenAI model = ChatOpenAI() prompt = ChatPromptTemplate.from_messages( [ ( "system", "You're an assistant who's good at {ability}. Respond in 20 words or fewer", ), MessagesPlaceholder(variable_name="history"), ("human", "{input}"), ] ) runnable = prompt | model from langchain_community.chat_message_histories import ChatMessageHistory from langchain_core.chat_history import BaseChatMessageHistory from langchain_core.runnables.history import RunnableWithMessageHistory store = {} def get_session_history(session_id: str) -> BaseChatMessageHistory: if session_id not in store: store[session_id] = ChatMessageHistory() return store[session_id] with_message_history = RunnableWithMessageHistory( runnable, get_session_history, input_messages_key="input", history_messages_key="history", ) with_message_history.invoke( {"ability": "math", "input": "What does cosine mean?"}, config={"configurable": {"session_id": "abc123"}}, ) with_message_history.invoke( {"ability": "math", "input": "What?"}, config={"configurable": {"session_id": "abc123"}}, ) with_message_history.invoke( {"ability": "math", "input": "What?"}, config={"configurable": {"session_id": "def234"}}, ) from langchain_core.runnables import ConfigurableFieldSpec store = {} def get_session_history(user_id: str, conversation_id: str) -> BaseChatMessageHistory: if (user_id, conversation_id) not in store: store[(user_id, conversation_id)] = ChatMessageHistory() return store[(user_id, conversation_id)] with_message_history = RunnableWithMessageHistory( runnable, get_session_history, input_messages_key="input", history_messages_key="history", history_factory_config=[ ConfigurableFieldSpec( id="user_id", annotation=str, name="User ID", description="Unique identifier for the user.", default="", is_shared=True, ), ConfigurableFieldSpec( id="conversation_id", annotation=str, name="Conversation ID", description="Unique identifier for the conversation.", default="", is_shared=True, ), ], ) with_message_history.invoke( {"ability": "math", "input": "Hello"}, config={"configurable": {"user_id": "123", "conversation_id": "1"}}, ) from langchain_core.messages import HumanMessage from langchain_core.runnables import RunnableParallel chain = RunnableParallel({"output_message": ChatOpenAI()}) def get_session_history(session_id: str) -> BaseChatMessageHistory: if session_id not in store: store[session_id] =
ChatMessageHistory()
langchain_community.chat_message_histories.ChatMessageHistory
SOURCE = "test" # @param {type:"Query"|"CollectionGroup"|"DocumentReference"|"string"} get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-datastore') PROJECT_ID = "my-project-id" # @param {type:"string"} get_ipython().system('gcloud config set project {PROJECT_ID}') from google.colab import auth auth.authenticate_user() get_ipython().system('gcloud services enable datastore.googleapis.com') from langchain_core.documents import Document from langchain_google_datastore import DatastoreSaver data = [Document(page_content="Hello, World!")] saver = DatastoreSaver() saver.upsert_documents(data) saver = DatastoreSaver("Collection") saver.upsert_documents(data) doc_ids = ["AnotherCollection/doc_id", "foo/bar"] saver = DatastoreSaver() saver.upsert_documents(documents=data, document_ids=doc_ids) from langchain_google_datastore import DatastoreLoader loader_collection = DatastoreLoader("Collection") loader_subcollection =
DatastoreLoader("Collection/doc/SubCollection")
langchain_google_datastore.DatastoreLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet supabase') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") os.environ["SUPABASE_URL"] = getpass.getpass("Supabase URL:") os.environ["SUPABASE_SERVICE_KEY"] = getpass.getpass("Supabase Service Key:") from dotenv import load_dotenv load_dotenv() import os from langchain_community.vectorstores import SupabaseVectorStore from langchain_openai import OpenAIEmbeddings from supabase.client import Client, create_client supabase_url = os.environ.get("SUPABASE_URL") supabase_key = os.environ.get("SUPABASE_SERVICE_KEY") supabase: Client = create_client(supabase_url, supabase_key) embeddings = OpenAIEmbeddings() from langchain_community.document_loaders import TextLoader from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter =
CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
langchain_text_splitters.CharacterTextSplitter
from langchain.chains import LLMMathChain from langchain_community.utilities import DuckDuckGoSearchAPIWrapper from langchain_core.tools import Tool from langchain_experimental.plan_and_execute import ( PlanAndExecute, load_agent_executor, load_chat_planner, ) from langchain_openai import ChatOpenAI, OpenAI search = DuckDuckGoSearchAPIWrapper() llm = OpenAI(temperature=0) llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True) tools = [ Tool( name="Search", func=search.run, description="useful for when you need to answer questions about current events", ), Tool( name="Calculator", func=llm_math_chain.run, description="useful for when you need to answer questions about math", ), ] model = ChatOpenAI(temperature=0) planner =
load_chat_planner(model)
langchain_experimental.plan_and_execute.load_chat_planner
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass() from operator import itemgetter from langchain.output_parsers import JsonOutputToolsParser from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough from langchain_core.tools import tool from langchain_openai import ChatOpenAI @tool def count_emails(last_n_days: int) -> int: """Multiply two integers together.""" return last_n_days * 2 @tool def send_email(message: str, recipient: str) -> str: "Add two integers." return f"Successfully sent email to {recipient}." tools = [count_emails, send_email] model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0).bind_tools(tools) def call_tool(tool_invocation: dict) -> Runnable: """Function for dynamically constructing the end of the chain based on the model-selected tool.""" tool_map = {tool.name: tool for tool in tools} tool = tool_map[tool_invocation["type"]] return RunnablePassthrough.assign(output=itemgetter("args") | tool) call_tool_list = RunnableLambda(call_tool).map() chain = model | JsonOutputToolsParser() | call_tool_list chain.invoke("how many emails did i get in the last 5 days?") import json def human_approval(tool_invocations: list) -> Runnable: tool_strs = "\n\n".join( json.dumps(tool_call, indent=2) for tool_call in tool_invocations ) msg = ( f"Do you approve of the following tool invocations\n\n{tool_strs}\n\n" "Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no." ) resp = input(msg) if resp.lower() not in ("yes", "y"): raise ValueError(f"Tool invocations not approved:\n\n{tool_strs}") return tool_invocations chain = model |
JsonOutputToolsParser()
langchain.output_parsers.JsonOutputToolsParser
get_ipython().run_line_magic('pip', 'install --upgrade --quiet vald-client-python') from langchain_community.document_loaders import TextLoader from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import Vald from langchain_text_splitters import CharacterTextSplitter raw_documents = TextLoader("state_of_the_union.txt").load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) documents = text_splitter.split_documents(raw_documents) embeddings = HuggingFaceEmbeddings() db = Vald.from_documents(documents, embeddings, host="localhost", port=8080) query = "What did the president say about Ketanji Brown Jackson" docs = db.similarity_search(query) docs[0].page_content embedding_vector = embeddings.embed_query(query) docs = db.similarity_search_by_vector(embedding_vector) docs[0].page_content docs_and_scores = db.similarity_search_with_score(query) docs_and_scores[0] retriever = db.as_retriever(search_type="mmr") retriever.get_relevant_documents(query) db.max_marginal_relevance_search(query, k=2, fetch_k=10) import grpc with open("test_root_cacert.crt", "rb") as root: credentials = grpc.ssl_channel_credentials(root_certificates=root.read()) with open(".ztoken", "rb") as ztoken: token = ztoken.read().strip() metadata = [(b"athenz-role-auth", token)] from langchain_community.document_loaders import TextLoader from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import Vald from langchain_text_splitters import CharacterTextSplitter raw_documents = TextLoader("state_of_the_union.txt").load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) documents = text_splitter.split_documents(raw_documents) embeddings =
HuggingFaceEmbeddings()
langchain_community.embeddings.HuggingFaceEmbeddings
import os import pprint os.environ["SERPER_API_KEY"] = "" from langchain_community.utilities import GoogleSerperAPIWrapper search = GoogleSerperAPIWrapper() search.run("Obama's first name?") os.environ["OPENAI_API_KEY"] = "" from langchain.agents import AgentType, Tool, initialize_agent from langchain_community.utilities import GoogleSerperAPIWrapper from langchain_openai import OpenAI llm = OpenAI(temperature=0) search =
GoogleSerperAPIWrapper()
langchain_community.utilities.GoogleSerperAPIWrapper
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent from langchain.chains import LLMChain from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory from langchain.prompts import PromptTemplate from langchain_community.utilities import GoogleSearchAPIWrapper from langchain_openai import OpenAI template = """This is a conversation between a human and a bot: {chat_history} Write a summary of the conversation for {input}: """ prompt = PromptTemplate(input_variables=["input", "chat_history"], template=template) memory = ConversationBufferMemory(memory_key="chat_history") readonlymemory = ReadOnlySharedMemory(memory=memory) summary_chain = LLMChain( llm=OpenAI(), prompt=prompt, verbose=True, memory=readonlymemory, # use the read-only memory to prevent the tool from modifying the memory ) search = GoogleSearchAPIWrapper() tools = [ Tool( name="Search", func=search.run, description="useful for when you need to answer questions about current events", ), Tool( name="Summary", func=summary_chain.run, description="useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.", ), ] prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:""" suffix = """Begin!" {chat_history} Question: {input} {agent_scratchpad}""" prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, input_variables=["input", "chat_history", "agent_scratchpad"], ) llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt) agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True) agent_chain = AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, verbose=True, memory=memory ) agent_chain.run(input="What is ChatGPT?") agent_chain.run(input="Who developed it?") agent_chain.run( input="Thanks. Summarize the conversation, for my daughter 5 years old." ) print(agent_chain.memory.buffer) template = """This is a conversation between a human and a bot: {chat_history} Write a summary of the conversation for {input}: """ prompt = PromptTemplate(input_variables=["input", "chat_history"], template=template) memory = ConversationBufferMemory(memory_key="chat_history") summary_chain = LLMChain( llm=OpenAI(), prompt=prompt, verbose=True, memory=memory, # <--- this is the only change ) search =
GoogleSearchAPIWrapper()
langchain_community.utilities.GoogleSearchAPIWrapper
get_ipython().system(' pip install --quiet pypdf chromadb tiktoken openai langchain-together') from langchain_community.document_loaders import PyPDFLoader loader = PyPDFLoader("~/Desktop/mixtral.pdf") data = loader.load() from langchain_text_splitters import RecursiveCharacterTextSplitter text_splitter =
RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=0)
langchain_text_splitters.RecursiveCharacterTextSplitter
get_ipython().run_line_magic('pip', 'install --upgrade --quiet runhouse') import runhouse as rh from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_community.llms import SelfHostedHuggingFaceLLM, SelfHostedPipeline gpu = rh.cluster(name="rh-a10x", instance_type="A100:1", use_spot=False) template = """Question: {question} Answer: Let's think step by step.""" prompt = PromptTemplate.from_template(template) llm = SelfHostedHuggingFaceLLM( model_id="gpt2", hardware=gpu, model_reqs=["pip:./", "transformers", "torch"] ) llm_chain = LLMChain(prompt=prompt, llm=llm) question = "What NFL team won the Super Bowl in the year Justin Beiber was born?" llm_chain.run(question) llm = SelfHostedHuggingFaceLLM( model_id="google/flan-t5-small", task="text2text-generation", hardware=gpu, ) llm("What is the capital of Germany?") def load_pipeline(): from transformers import ( AutoModelForCausalLM, AutoTokenizer, pipeline, ) model_id = "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10 ) return pipe def inference_fn(pipeline, prompt, stop=None): return pipeline(prompt)[0]["generated_text"][len(prompt) :] llm = SelfHostedHuggingFaceLLM( model_load_fn=load_pipeline, hardware=gpu, inference_fn=inference_fn ) llm("Who is the current US president?") pipeline = load_pipeline() llm = SelfHostedPipeline.from_pipeline( pipeline=pipeline, hardware=gpu, model_reqs=["pip:./", "transformers", "torch"] ) import pickle rh.blob(pickle.dumps(pipeline), path="models/pipeline.pkl").save().to( gpu, path="models" ) llm =
SelfHostedPipeline.from_pipeline(pipeline="models/pipeline.pkl", hardware=gpu)
langchain_community.llms.SelfHostedPipeline.from_pipeline
get_ipython().run_line_magic('pip', 'install --upgrade --quiet openllm') from langchain_community.llms import OpenLLM server_url = "http://localhost:3000" # Replace with remote host if you are running on a remote server llm = OpenLLM(server_url=server_url) from langchain_community.llms import OpenLLM llm = OpenLLM( model_name="dolly-v2", model_id="databricks/dolly-v2-3b", temperature=0.94, repetition_penalty=1.2, ) from langchain.chains import LLMChain from langchain.prompts import PromptTemplate template = "What is a good name for a company that makes {product}?" prompt =
PromptTemplate.from_template(template)
langchain.prompts.PromptTemplate.from_template
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-robocorp') from langchain.agents import AgentExecutor, OpenAIFunctionsAgent from langchain_core.messages import SystemMessage from langchain_openai import ChatOpenAI from langchain_robocorp import ActionServerToolkit llm = ChatOpenAI(model="gpt-4", temperature=0) toolkit = ActionServerToolkit(url="http://localhost:8080", report_trace=True) tools = toolkit.get_tools() system_message =
SystemMessage(content="You are a helpful assistant")
langchain_core.messages.SystemMessage
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-cloud-bigquery') from langchain_community.document_loaders import BigQueryLoader BASE_QUERY = """ SELECT id, dna_sequence, organism FROM ( SELECT ARRAY ( SELECT AS STRUCT 1 AS id, "ATTCGA" AS dna_sequence, "Lokiarchaeum sp. (strain GC14_75)." AS organism UNION ALL SELECT AS STRUCT 2 AS id, "AGGCGA" AS dna_sequence, "Heimdallarchaeota archaeon (strain LC_2)." AS organism UNION ALL SELECT AS STRUCT 3 AS id, "TCCGGA" AS dna_sequence, "Acidianus hospitalis (strain W1)." AS organism) AS new_array), UNNEST(new_array) """ loader = BigQueryLoader(BASE_QUERY) data = loader.load() print(data) loader = BigQueryLoader( BASE_QUERY, page_content_columns=["dna_sequence", "organism"], metadata_columns=["id"], ) data = loader.load() print(data) ALIASED_QUERY = """ SELECT id, dna_sequence, organism, id as source FROM ( SELECT ARRAY ( SELECT AS STRUCT 1 AS id, "ATTCGA" AS dna_sequence, "Lokiarchaeum sp. (strain GC14_75)." AS organism UNION ALL SELECT AS STRUCT 2 AS id, "AGGCGA" AS dna_sequence, "Heimdallarchaeota archaeon (strain LC_2)." AS organism UNION ALL SELECT AS STRUCT 3 AS id, "TCCGGA" AS dna_sequence, "Acidianus hospitalis (strain W1)." AS organism) AS new_array), UNNEST(new_array) """ loader =
BigQueryLoader(ALIASED_QUERY, metadata_columns=["source"])
langchain_community.document_loaders.BigQueryLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass() from langchain_core.tools import tool @tool def complex_tool(int_arg: int, float_arg: float, dict_arg: dict) -> int: """Do something complex with a complex tool.""" return int_arg * float_arg from langchain_openai import ChatOpenAI model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) model_with_tools = model.bind_tools( [complex_tool], tool_choice="complex_tool", ) from operator import itemgetter from langchain.output_parsers import JsonOutputKeyToolsParser from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough chain = ( model_with_tools | JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True) | complex_tool ) chain.invoke( "use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg" ) from typing import Any from langchain_core.runnables import RunnableConfig def try_except_tool(tool_args: dict, config: RunnableConfig) -> Runnable: try: complex_tool.invoke(tool_args, config=config) except Exception as e: return f"Calling tool with arguments:\n\n{tool_args}\n\nraised the following error:\n\n{type(e)}: {e}" chain = ( model_with_tools | JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True) | try_except_tool ) print( chain.invoke( "use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg" ) ) chain = ( model_with_tools |
JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True)
langchain.output_parsers.JsonOutputKeyToolsParser
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-google-cloud-sql-pg langchain-google-vertexai') from google.colab import auth auth.authenticate_user() PROJECT_ID = "my-project-id" # @param {type:"string"} get_ipython().system('gcloud config set project {PROJECT_ID}') get_ipython().system('gcloud services enable sqladmin.googleapis.com') REGION = "us-central1" # @param {type: "string"} INSTANCE = "my-pg-instance" # @param {type: "string"} DATABASE = "my-database" # @param {type: "string"} TABLE_NAME = "vector_store" # @param {type: "string"} from langchain_google_cloud_sql_pg import PostgreSQLEngine engine = await PostgreSQLEngine.afrom_instance( project_id=PROJECT_ID, region=REGION, instance=INSTANCE, database=DATABASE ) from langchain_google_cloud_sql_pg import PostgreSQLEngine await engine.ainit_vectorstore_table( table_name=TABLE_NAME, vector_size=768, # Vector size for VertexAI model(textembedding-gecko@latest) ) get_ipython().system('gcloud services enable aiplatform.googleapis.com') from langchain_google_vertexai import VertexAIEmbeddings embedding = VertexAIEmbeddings( model_name="textembedding-gecko@latest", project=PROJECT_ID ) from langchain_google_cloud_sql_pg import PostgresVectorStore store = await PostgresVectorStore.create( # Use .create() to initialize an async vector store engine=engine, table_name=TABLE_NAME, embedding_service=embedding, ) import uuid all_texts = ["Apples and oranges", "Cars and airplanes", "Pineapple", "Train", "Banana"] metadatas = [{"len": len(t)} for t in all_texts] ids = [str(uuid.uuid4()) for _ in all_texts] await store.aadd_texts(all_texts, metadatas=metadatas, ids=ids) await store.adelete([ids[1]]) query = "I'd like a fruit." docs = await store.asimilarity_search(query) print(docs) query_vector = embedding.embed_query(query) docs = await store.asimilarity_search_by_vector(query_vector, k=2) print(docs) from langchain_google_cloud_sql_pg.indexes import IVFFlatIndex index = IVFFlatIndex() await store.aapply_vector_index(index) await store.areindex() # Re-index using default index name await store.aadrop_vector_index() # Delete index using default name from langchain_google_cloud_sql_pg import Column TABLE_NAME = "vectorstore_custom" await engine.ainit_vectorstore_table( table_name=TABLE_NAME, vector_size=768, # VertexAI model: textembedding-gecko@latest metadata_columns=[
Column("len", "INTEGER")
langchain_google_cloud_sql_pg.Column
from langchain_community.document_loaders import HuggingFaceDatasetLoader dataset_name = "imdb" page_content_column = "text" loader = HuggingFaceDatasetLoader(dataset_name, page_content_column) data = loader.load() data[:15] from langchain.indexes import VectorstoreIndexCreator from langchain_community.document_loaders.hugging_face_dataset import ( HuggingFaceDatasetLoader, ) dataset_name = "tweet_eval" page_content_column = "text" name = "stance_climate" loader =
HuggingFaceDatasetLoader(dataset_name, page_content_column, name)
langchain_community.document_loaders.hugging_face_dataset.HuggingFaceDatasetLoader
from langchain_community.utils.openai_functions import ( convert_pydantic_to_openai_function, ) from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field, validator from langchain_openai import ChatOpenAI class Joke(BaseModel): """Joke to tell user.""" setup: str = Field(description="question to set up a joke") punchline: str = Field(description="answer to resolve the joke") openai_functions = [convert_pydantic_to_openai_function(Joke)] model = ChatOpenAI(temperature=0) prompt = ChatPromptTemplate.from_messages( [("system", "You are helpful assistant"), ("user", "{input}")] ) from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser parser =
JsonOutputFunctionsParser()
langchain.output_parsers.openai_functions.JsonOutputFunctionsParser
get_ipython().run_line_magic('pip', 'install --upgrade --quiet semanticscholar') from langchain import hub from langchain.agents import AgentExecutor, create_openai_functions_agent from langchain_openai import ChatOpenAI instructions = """You are an expert researcher.""" base_prompt =
hub.pull("langchain-ai/openai-functions-template")
langchain.hub.pull
get_ipython().system('pip install --upgrade volcengine') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") from langchain.document_loaders import TextLoader from langchain.vectorstores.vikingdb import VikingDB, VikingDBConfig from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter loader =
TextLoader("./test.txt")
langchain.document_loaders.TextLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lark') get_ipython().run_line_magic('pip', 'install --upgrade --quiet pymilvus') import os OPENAI_API_KEY = "Use your OpenAI key:)" os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY from langchain_community.vectorstores import Milvus from langchain_core.documents import Document from langchain_openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() docs = [ Document( page_content="A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata={"year": 1993, "rating": 7.7, "genre": "action"}, ), Document( page_content="Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata={"year": 2010, "genre": "thriller", "rating": 8.2}, ), Document( page_content="A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata={"year": 2019, "rating": 8.3, "genre": "drama"}, ), Document( page_content="Three men walk into the Zone, three men walk out of the Zone", metadata={"year": 1979, "rating": 9.9, "genre": "science fiction"}, ), Document( page_content="A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata={"year": 2006, "genre": "thriller", "rating": 9.0}, ), Document( page_content="Toys come alive and have a blast doing so", metadata={"year": 1995, "genre": "animated", "rating": 9.3}, ), ] vector_store =
Milvus.from_documents( docs, embedding=embeddings, connection_args={"uri": "Use your uri:)
langchain_community.vectorstores.Milvus.from_documents
get_ipython().system('poetry run pip -q install psychicapi') from langchain_community.document_loaders import PsychicLoader from psychicapi import ConnectorId google_drive_loader = PsychicLoader( api_key="7ddb61c1-8b6a-4d31-a58e-30d1c9ea480e", connector_id=ConnectorId.gdrive.value, connection_id="google-test", ) documents = google_drive_loader.load() from langchain.chains import RetrievalQAWithSourcesChain from langchain_community.vectorstores import Chroma from langchain_openai import OpenAI, OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() docsearch =
Chroma.from_documents(texts, embeddings)
langchain_community.vectorstores.Chroma.from_documents
get_ipython().run_line_magic('pip', 'install --upgrade --quiet "cassio>=0.1.4"') import os from getpass import getpass from datasets import ( load_dataset, ) from langchain_community.document_loaders import PyPDFLoader from langchain_core.documents import Document from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnablePassthrough from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter os.environ["OPENAI_API_KEY"] = getpass("OPENAI_API_KEY = ") embe = OpenAIEmbeddings() from langchain_community.vectorstores import Cassandra from cassandra.cluster import Cluster cluster = Cluster(["127.0.0.1"]) session = cluster.connect() import cassio CASSANDRA_KEYSPACE = input("CASSANDRA_KEYSPACE = ") cassio.init(session=session, keyspace=CASSANDRA_KEYSPACE) vstore = Cassandra( embedding=embe, table_name="cassandra_vector_demo", ) ASTRA_DB_ID = input("ASTRA_DB_ID = ") ASTRA_DB_APPLICATION_TOKEN = getpass("ASTRA_DB_APPLICATION_TOKEN = ") desired_keyspace = input("ASTRA_DB_KEYSPACE (optional, can be left empty) = ") if desired_keyspace: ASTRA_DB_KEYSPACE = desired_keyspace else: ASTRA_DB_KEYSPACE = None import cassio cassio.init( database_id=ASTRA_DB_ID, token=ASTRA_DB_APPLICATION_TOKEN, keyspace=ASTRA_DB_KEYSPACE, ) vstore = Cassandra( embedding=embe, table_name="cassandra_vector_demo", ) philo_dataset = load_dataset("datastax/philosopher-quotes")["train"] docs = [] for entry in philo_dataset: metadata = {"author": entry["author"]} doc = Document(page_content=entry["quote"], metadata=metadata) docs.append(doc) inserted_ids = vstore.add_documents(docs) print(f"\nInserted {len(inserted_ids)} documents.") texts = ["I think, therefore I am.", "To the things themselves!"] metadatas = [{"author": "descartes"}, {"author": "husserl"}] ids = ["desc_01", "huss_xy"] inserted_ids_2 = vstore.add_texts(texts=texts, metadatas=metadatas, ids=ids) print(f"\nInserted {len(inserted_ids_2)} documents.") results = vstore.similarity_search("Our life is what we make of it", k=3) for res in results: print(f"* {res.page_content} [{res.metadata}]") results_filtered = vstore.similarity_search( "Our life is what we make of it", k=3, filter={"author": "plato"}, ) for res in results_filtered: print(f"* {res.page_content} [{res.metadata}]") results = vstore.similarity_search_with_score("Our life is what we make of it", k=3) for res, score in results: print(f"* [SIM={score:3f}] {res.page_content} [{res.metadata}]") results = vstore.max_marginal_relevance_search( "Our life is what we make of it", k=3, filter={"author": "aristotle"}, ) for res in results: print(f"* {res.page_content} [{res.metadata}]") delete_1 = vstore.delete(inserted_ids[:3]) print(f"all_succeed={delete_1}") # True, all documents deleted delete_2 = vstore.delete(inserted_ids[2:5]) print(f"some_succeeds={delete_2}") # True, though some IDs were gone already get_ipython().system('curl -L "https://github.com/awesome-astra/datasets/blob/main/demo-resources/what-is-philosophy/what-is-philosophy.pdf?raw=true" -o "what-is-philosophy.pdf"') pdf_loader = PyPDFLoader("what-is-philosophy.pdf") splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=64) docs_from_pdf = pdf_loader.load_and_split(text_splitter=splitter) print(f"Documents from PDF: {len(docs_from_pdf)}.") inserted_ids_from_pdf = vstore.add_documents(docs_from_pdf) print(f"Inserted {len(inserted_ids_from_pdf)} documents.") retriever = vstore.as_retriever(search_kwargs={"k": 3}) philo_template = """ You are a philosopher that draws inspiration from great thinkers of the past to craft well-thought answers to user questions. Use the provided context as the basis for your answers and do not make up new reasoning paths - just mix-and-match what you are given. Your answers must be concise and to the point, and refrain from answering about other topics than philosophy. CONTEXT: {context} QUESTION: {question} YOUR ANSWER:""" philo_prompt = ChatPromptTemplate.from_template(philo_template) llm = ChatOpenAI() chain = ( {"context": retriever, "question": RunnablePassthrough()} | philo_prompt | llm |
StrOutputParser()
langchain_core.output_parsers.StrOutputParser
get_ipython().run_line_magic('pip', 'install --upgrade --quiet duckdb') from langchain_community.document_loaders import DuckDBLoader get_ipython().run_cell_magic('file', 'example.csv', 'Team,Payroll\nNationals,81.34\nReds,82.20\n') loader = DuckDBLoader("SELECT * FROM read_csv_auto('example.csv')") data = loader.load() print(data) loader =
DuckDBLoader( "SELECT * FROM read_csv_auto('example.csv')
langchain_community.document_loaders.DuckDBLoader
from langchain.pydantic_v1 import BaseModel, Field from langchain.tools import BaseTool, StructuredTool, tool @tool def search(query: str) -> str: """Look up things online.""" return "LangChain" print(search.name) print(search.description) print(search.args) @tool def multiply(a: int, b: int) -> int: """Multiply two numbers.""" return a * b print(multiply.name) print(multiply.description) print(multiply.args) class SearchInput(BaseModel): query: str = Field(description="should be a search query") @tool("search-tool", args_schema=SearchInput, return_direct=True) def search(query: str) -> str: """Look up things online.""" return "LangChain" print(search.name) print(search.description) print(search.args) print(search.return_direct) from typing import Optional, Type from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) class SearchInput(BaseModel): query: str =
Field(description="should be a search query")
langchain.pydantic_v1.Field
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_openai.chat_models import ChatOpenAI model = ChatOpenAI() prompt = ChatPromptTemplate.from_messages( [ ( "system", "You're an assistant who's good at {ability}. Respond in 20 words or fewer", ), MessagesPlaceholder(variable_name="history"), ("human", "{input}"), ] ) runnable = prompt | model from langchain_community.chat_message_histories import ChatMessageHistory from langchain_core.chat_history import BaseChatMessageHistory from langchain_core.runnables.history import RunnableWithMessageHistory store = {} def get_session_history(session_id: str) -> BaseChatMessageHistory: if session_id not in store: store[session_id] = ChatMessageHistory() return store[session_id] with_message_history = RunnableWithMessageHistory( runnable, get_session_history, input_messages_key="input", history_messages_key="history", ) with_message_history.invoke( {"ability": "math", "input": "What does cosine mean?"}, config={"configurable": {"session_id": "abc123"}}, ) with_message_history.invoke( {"ability": "math", "input": "What?"}, config={"configurable": {"session_id": "abc123"}}, ) with_message_history.invoke( {"ability": "math", "input": "What?"}, config={"configurable": {"session_id": "def234"}}, ) from langchain_core.runnables import ConfigurableFieldSpec store = {} def get_session_history(user_id: str, conversation_id: str) -> BaseChatMessageHistory: if (user_id, conversation_id) not in store: store[(user_id, conversation_id)] = ChatMessageHistory() return store[(user_id, conversation_id)] with_message_history = RunnableWithMessageHistory( runnable, get_session_history, input_messages_key="input", history_messages_key="history", history_factory_config=[ ConfigurableFieldSpec( id="user_id", annotation=str, name="User ID", description="Unique identifier for the user.", default="", is_shared=True, ), ConfigurableFieldSpec( id="conversation_id", annotation=str, name="Conversation ID", description="Unique identifier for the conversation.", default="", is_shared=True, ), ], ) with_message_history.invoke( {"ability": "math", "input": "Hello"}, config={"configurable": {"user_id": "123", "conversation_id": "1"}}, ) from langchain_core.messages import HumanMessage from langchain_core.runnables import RunnableParallel chain = RunnableParallel({"output_message":
ChatOpenAI()
langchain_openai.chat_models.ChatOpenAI
get_ipython().run_line_magic('pip', 'install --upgrade --quiet bibtexparser pymupdf') from langchain_community.document_loaders import BibtexLoader import urllib.request urllib.request.urlretrieve( "https://www.fourmilab.ch/etexts/einstein/specrel/specrel.pdf", "einstein1905.pdf" ) bibtex_text = """ @article{einstein1915, title={Die Feldgleichungen der Gravitation}, abstract={Die Grundgleichungen der Gravitation, die ich hier entwickeln werde, wurden von mir in einer Abhandlung: ,,Die formale Grundlage der allgemeinen Relativit{\"a}tstheorie`` in den Sitzungsberichten der Preu{\ss}ischen Akademie der Wissenschaften 1915 ver{\"o}ffentlicht.}, author={Einstein, Albert}, journal={Sitzungsberichte der K{\"o}niglich Preu{\ss}ischen Akademie der Wissenschaften}, volume={1915}, number={1}, pages={844--847}, year={1915}, doi={10.1002/andp.19163540702}, link={https://onlinelibrary.wiley.com/doi/abs/10.1002/andp.19163540702}, file={einstein1905.pdf} } """ with open("./biblio.bib", "w") as file: file.write(bibtex_text) docs =
BibtexLoader("./biblio.bib")
langchain_community.document_loaders.BibtexLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') from langchain.model_laboratory import ModelLaboratory from langchain.prompts import PromptTemplate from langchain_community.llms import Cohere, HuggingFaceHub from langchain_openai import OpenAI import getpass import os os.environ["COHERE_API_KEY"] = getpass.getpass("Cohere API Key:") os.environ["OPENAI_API_KEY"] = getpass.getpass("Open API Key:") os.environ["HUGGINGFACEHUB_API_TOKEN"] = getpass.getpass("Hugging Face API Key:") llms = [ OpenAI(temperature=0),
Cohere(temperature=0)
langchain_community.llms.Cohere
get_ipython().run_line_magic('pip', 'install --upgrade --quiet infinopy') get_ipython().run_line_magic('pip', 'install --upgrade --quiet matplotlib') get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken') import datetime as dt import json import time import matplotlib.dates as md import matplotlib.pyplot as plt from infinopy import InfinoClient from langchain.callbacks import InfinoCallbackHandler from langchain_openai import OpenAI get_ipython().system('docker run --rm --detach --name infino-example -p 3000:3000 infinohq/infino:latest') client = InfinoClient() data = """In what country is Normandy located? When were the Normans in Normandy? From which countries did the Norse originate? Who was the Norse leader? What century did the Normans first gain their separate identity? Who gave their name to Normandy in the 1000's and 1100's What is France a region of? Who did King Charles III swear fealty to? When did the Frankish identity emerge? Who was the duke in the battle of Hastings? Who ruled the duchy of Normandy What religion were the Normans What type of major impact did the Norman dynasty have on modern Europe? Who was famed for their Christian spirit? Who assimilted the Roman language? Who ruled the country of Normandy? What principality did William the conquerer found? What is the original meaning of the word Norman? When was the Latin version of the word Norman first recorded? What name comes from the English words Normans/Normanz?""" questions = data.split("\n") handler = InfinoCallbackHandler( model_id="test_openai", model_version="0.1", verbose=False ) llm = OpenAI(temperature=0.1) num_questions = 10 questions = questions[0:num_questions] for question in questions: print(question) llm_result = llm.generate([question], callbacks=[handler]) print(llm_result) def plot(data, title): data = json.loads(data) timestamps = [item["time"] for item in data] dates = [dt.datetime.fromtimestamp(ts) for ts in timestamps] y = [item["value"] for item in data] plt.rcParams["figure.figsize"] = [6, 4] plt.subplots_adjust(bottom=0.2) plt.xticks(rotation=25) ax = plt.gca() xfmt = md.DateFormatter("%Y-%m-%d %H:%M:%S") ax.xaxis.set_major_formatter(xfmt) plt.plot(dates, y) plt.xlabel("Time") plt.ylabel("Value") plt.title(title) plt.show() response = client.search_ts("__name__", "latency", 0, int(time.time())) plot(response.text, "Latency") response = client.search_ts("__name__", "error", 0, int(time.time())) plot(response.text, "Errors") response = client.search_ts("__name__", "prompt_tokens", 0, int(time.time())) plot(response.text, "Prompt Tokens") response = client.search_ts("__name__", "completion_tokens", 0, int(time.time())) plot(response.text, "Completion Tokens") response = client.search_ts("__name__", "total_tokens", 0, int(time.time())) plot(response.text, "Total Tokens") query = "normandy" response = client.search_log(query, 0, int(time.time())) print("Results for", query, ":", response.text) print("===") query = "king charles III" response = client.search_log("king charles III", 0, int(time.time())) print("Results for", query, ":", response.text) from langchain.chains.summarize import load_summarize_chain from langchain_community.document_loaders import WebBaseLoader from langchain_openai import ChatOpenAI handler = InfinoCallbackHandler( model_id="test_chatopenai", model_version="0.1", verbose=False ) urls = [ "https://lilianweng.github.io/posts/2023-06-23-agent/", "https://medium.com/lyft-engineering/lyftlearn-ml-model-training-infrastructure-built-on-kubernetes-aef8218842bb", "https://blog.langchain.dev/week-of-10-2-langchain-release-notes/", ] for url in urls: loader = WebBaseLoader(url) docs = loader.load() llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo-16k", callbacks=[handler]) chain =
load_summarize_chain(llm, chain_type="stuff", verbose=False)
langchain.chains.summarize.load_summarize_chain
from langchain import hub from langchain.agents import AgentExecutor, create_react_agent from langchain_community.tools import WikipediaQueryRun from langchain_community.utilities import WikipediaAPIWrapper from langchain_openai import ChatOpenAI api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100) tool = WikipediaQueryRun(api_wrapper=api_wrapper) tools = [tool] prompt = hub.pull("hwchase17/react") llm = ChatOpenAI(temperature=0) agent =
create_react_agent(llm, tools, prompt)
langchain.agents.create_react_agent
import requests def download_drive_file(url: str, output_path: str = "chat.db") -> None: file_id = url.split("/")[-2] download_url = f"https://drive.google.com/uc?export=download&id={file_id}" response = requests.get(download_url) if response.status_code != 200: print("Failed to download the file.") return with open(output_path, "wb") as file: file.write(response.content) print(f"File {output_path} downloaded.") url = ( "https://drive.google.com/file/d/1NebNKqTA2NXApCmeH6mu0unJD2tANZzo/view?usp=sharing" ) download_drive_file(url) from langchain_community.chat_loaders.imessage import IMessageChatLoader loader = IMessageChatLoader( path="./chat.db", ) from typing import List from langchain_community.chat_loaders.base import ChatSession from langchain_community.chat_loaders.utils import ( map_ai_messages, merge_chat_runs, ) raw_messages = loader.lazy_load() merged_messages =
merge_chat_runs(raw_messages)
langchain_community.chat_loaders.utils.merge_chat_runs
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pyvespa') from vespa.package import ApplicationPackage, Field, RankProfile app_package = ApplicationPackage(name="testapp") app_package.schema.add_fields( Field( name="text", type="string", indexing=["index", "summary"], index="enable-bm25" ), Field( name="embedding", type="tensor<float>(x[384])", indexing=["attribute", "summary"], attribute=["distance-metric: angular"], ), ) app_package.schema.add_rank_profile( RankProfile( name="default", first_phase="closeness(field, embedding)", inputs=[("query(query_embedding)", "tensor<float>(x[384])")], ) ) from vespa.deployment import VespaDocker vespa_docker = VespaDocker() vespa_app = vespa_docker.deploy(application_package=app_package) from langchain_community.document_loaders import TextLoader from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) from langchain_community.embeddings.sentence_transformer import ( SentenceTransformerEmbeddings, ) embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") vespa_config = dict( page_content_field="text", embedding_field="embedding", input_field="query_embedding", ) from langchain_community.vectorstores import VespaStore db = VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config) query = "What did the president say about Ketanji Brown Jackson" results = db.similarity_search(query) print(results[0].page_content) query = "What did the president say about Ketanji Brown Jackson" results = db.similarity_search(query) result = results[0] result.page_content = "UPDATED: " + result.page_content db.add_texts([result.page_content], [result.metadata], result.metadata["id"]) results = db.similarity_search(query) print(results[0].page_content) result = db.similarity_search(query) db.delete(["32"]) result = db.similarity_search(query) results = db.similarity_search_with_score(query) result = results[0] db = VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config) retriever = db.as_retriever() query = "What did the president say about Ketanji Brown Jackson" results = retriever.get_relevant_documents(query) app_package.schema.add_fields( Field(name="date", type="string", indexing=["attribute", "summary"]), Field(name="rating", type="int", indexing=["attribute", "summary"]), Field(name="author", type="string", indexing=["attribute", "summary"]), ) vespa_app = vespa_docker.deploy(application_package=app_package) for i, doc in enumerate(docs): doc.metadata["date"] = f"2023-{(i % 12)+1}-{(i % 28)+1}" doc.metadata["rating"] = range(1, 6)[i % 5] doc.metadata["author"] = ["Joe Biden", "Unknown"][min(i, 1)] vespa_config.update(dict(metadata_fields=["date", "rating", "author"])) db =
VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config)
langchain_community.vectorstores.VespaStore.from_documents
from langchain.agents import Tool from langchain_community.tools.file_management.read import ReadFileTool from langchain_community.tools.file_management.write import WriteFileTool from langchain_community.utilities import SerpAPIWrapper search =
SerpAPIWrapper()
langchain_community.utilities.SerpAPIWrapper
get_ipython().run_line_magic('pip', 'install --upgrade --quiet promptlayer --upgrade') import promptlayer # Don't forget this 🍰 from langchain.callbacks import PromptLayerCallbackHandler from langchain.schema import ( HumanMessage, ) from langchain_openai import ChatOpenAI chat_llm = ChatOpenAI( temperature=0, callbacks=[PromptLayerCallbackHandler(pl_tags=["chatopenai"])], ) llm_results = chat_llm( [
HumanMessage(content="What comes after 1,2,3 ?")
langchain.schema.HumanMessage
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pyvespa') from vespa.package import ApplicationPackage, Field, RankProfile app_package = ApplicationPackage(name="testapp") app_package.schema.add_fields( Field( name="text", type="string", indexing=["index", "summary"], index="enable-bm25" ), Field( name="embedding", type="tensor<float>(x[384])", indexing=["attribute", "summary"], attribute=["distance-metric: angular"], ), ) app_package.schema.add_rank_profile( RankProfile( name="default", first_phase="closeness(field, embedding)", inputs=[("query(query_embedding)", "tensor<float>(x[384])")], ) ) from vespa.deployment import VespaDocker vespa_docker = VespaDocker() vespa_app = vespa_docker.deploy(application_package=app_package) from langchain_community.document_loaders import TextLoader from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) from langchain_community.embeddings.sentence_transformer import ( SentenceTransformerEmbeddings, ) embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") vespa_config = dict( page_content_field="text", embedding_field="embedding", input_field="query_embedding", ) from langchain_community.vectorstores import VespaStore db =
VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config)
langchain_community.vectorstores.VespaStore.from_documents
get_ipython().run_line_magic('pip', 'install --upgrade --quiet doctran') import json from langchain_community.document_transformers import DoctranPropertyExtractor from langchain_core.documents import Document from dotenv import load_dotenv load_dotenv() sample_text = """[Generated with ChatGPT] Confidential Document - For Internal Use Only Date: July 1, 2023 Subject: Updates and Discussions on Various Topics Dear Team, I hope this email finds you well. In this document, I would like to provide you with some important updates and discuss various topics that require our attention. Please treat the information contained herein as highly confidential. Security and Privacy Measures As part of our ongoing commitment to ensure the security and privacy of our customers' data, we have implemented robust measures across all our systems. We would like to commend John Doe (email: john.doe@example.com) from the IT department for his diligent work in enhancing our network security. Moving forward, we kindly remind everyone to strictly adhere to our data protection policies and guidelines. Additionally, if you come across any potential security risks or incidents, please report them immediately to our dedicated team at security@example.com. HR Updates and Employee Benefits Recently, we welcomed several new team members who have made significant contributions to their respective departments. I would like to recognize Jane Smith (SSN: 049-45-5928) for her outstanding performance in customer service. Jane has consistently received positive feedback from our clients. Furthermore, please remember that the open enrollment period for our employee benefits program is fast approaching. Should you have any questions or require assistance, please contact our HR representative, Michael Johnson (phone: 418-492-3850, email: michael.johnson@example.com). Marketing Initiatives and Campaigns Our marketing team has been actively working on developing new strategies to increase brand awareness and drive customer engagement. We would like to thank Sarah Thompson (phone: 415-555-1234) for her exceptional efforts in managing our social media platforms. Sarah has successfully increased our follower base by 20% in the past month alone. Moreover, please mark your calendars for the upcoming product launch event on July 15th. We encourage all team members to attend and support this exciting milestone for our company. Research and Development Projects In our pursuit of innovation, our research and development department has been working tirelessly on various projects. I would like to acknowledge the exceptional work of David Rodriguez (email: david.rodriguez@example.com) in his role as project lead. David's contributions to the development of our cutting-edge technology have been instrumental. Furthermore, we would like to remind everyone to share their ideas and suggestions for potential new projects during our monthly R&D brainstorming session, scheduled for July 10th. Please treat the information in this document with utmost confidentiality and ensure that it is not shared with unauthorized individuals. If you have any questions or concerns regarding the topics discussed, please do not hesitate to reach out to me directly. Thank you for your attention, and let's continue to work together to achieve our goals. Best regards, Jason Fan Cofounder & CEO Psychic jason@psychic.dev """ print(sample_text) documents = [Document(page_content=sample_text)] properties = [ { "name": "category", "description": "What type of email this is.", "type": "string", "enum": ["update", "action_item", "customer_feedback", "announcement", "other"], "required": True, }, { "name": "mentions", "description": "A list of all people mentioned in this email.", "type": "array", "items": { "name": "full_name", "description": "The full name of the person mentioned.", "type": "string", }, "required": True, }, { "name": "eli5", "description": "Explain this email to me like I'm 5 years old.", "type": "string", "required": True, }, ] property_extractor =
DoctranPropertyExtractor(properties=properties)
langchain_community.document_transformers.DoctranPropertyExtractor
from langchain.chains import ConversationChain from langchain.memory import ConversationBufferMemory from langchain_openai import OpenAI llm = OpenAI(temperature=0) conversation = ConversationChain( llm=llm, verbose=True, memory=ConversationBufferMemory() ) conversation.predict(input="Hi there!") conversation.predict(input="What's the weather?") from langchain.prompts.prompt import PromptTemplate template = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. Current conversation: {history} Human: {input} AI Assistant:""" PROMPT = PromptTemplate(input_variables=["history", "input"], template=template) conversation = ConversationChain( prompt=PROMPT, llm=llm, verbose=True, memory=ConversationBufferMemory(ai_prefix="AI Assistant"), ) conversation.predict(input="Hi there!") conversation.predict(input="What's the weather?") from langchain.prompts.prompt import PromptTemplate template = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. Current conversation: {history} Friend: {input} AI:""" PROMPT = PromptTemplate(input_variables=["history", "input"], template=template) conversation = ConversationChain( prompt=PROMPT, llm=llm, verbose=True, memory=
ConversationBufferMemory(human_prefix="Friend")
langchain.memory.ConversationBufferMemory
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken langchain-openai python-dotenv datasets langchain deeplake beautifulsoup4 html2text ragas') ORG_ID = "..." import getpass import os from langchain.chains import RetrievalQA from langchain.vectorstores.deeplake import DeepLake from langchain_openai import OpenAIChat, OpenAIEmbeddings os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI API token: ") os.environ["ACTIVELOOP_TOKEN"] = getpass.getpass( "Enter your ActiveLoop API token: " ) # Get your API token from https://app.activeloop.ai, click on your profile picture in the top right corner, and select "API Tokens" token = os.getenv("ACTIVELOOP_TOKEN") openai_embeddings = OpenAIEmbeddings() db = DeepLake( dataset_path=f"hub://{ORG_ID}/deeplake-docs-deepmemory", # org_id stands for your username or organization from activeloop embedding=openai_embeddings, runtime={"tensor_db": True}, token=token, read_only=False, ) from urllib.parse import urljoin import requests from bs4 import BeautifulSoup def get_all_links(url): response = requests.get(url) if response.status_code != 200: print(f"Failed to retrieve the page: {url}") return [] soup = BeautifulSoup(response.content, "html.parser") links = [ urljoin(url, a["href"]) for a in soup.find_all("a", href=True) if a["href"] ] return links base_url = "https://docs.deeplake.ai/en/latest/" all_links = get_all_links(base_url) from langchain.document_loaders import AsyncHtmlLoader loader = AsyncHtmlLoader(all_links) docs = loader.load() from langchain.document_transformers import Html2TextTransformer html2text = Html2TextTransformer() docs_transformed = html2text.transform_documents(docs) from langchain_text_splitters import RecursiveCharacterTextSplitter chunk_size = 4096 docs_new = [] text_splitter = RecursiveCharacterTextSplitter( chunk_size=chunk_size, ) for doc in docs_transformed: if len(doc.page_content) < chunk_size: docs_new.append(doc) else: docs = text_splitter.create_documents([doc.page_content]) docs_new.extend(docs) docs = db.add_documents(docs_new) from typing import List from langchain.chains.openai_functions import ( create_structured_output_chain, ) from langchain_core.messages import HumanMessage, SystemMessage from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate from langchain_openai import ChatOpenAI from pydantic import BaseModel, Field docs = db.vectorstore.dataset.text.data(fetch_chunks=True, aslist=True)["value"] ids = db.vectorstore.dataset.id.data(fetch_chunks=True, aslist=True)["value"] llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) class Questions(BaseModel): """Identifying information about a person.""" question: str = Field(..., description="Questions about text") prompt_msgs = [ SystemMessage( content="You are a world class expert for generating questions based on provided context. \ You make sure the question can be answered by the text." ), HumanMessagePromptTemplate.from_template( "Use the given text to generate a question from the following input: {input}" ), HumanMessage(content="Tips: Make sure to answer in the correct format"), ] prompt =
ChatPromptTemplate(messages=prompt_msgs)
langchain_core.prompts.ChatPromptTemplate
get_ipython().system(' pip install langchain replicate') from langchain_community.chat_models import ChatOllama llama2_chat = ChatOllama(model="llama2:13b-chat") llama2_code = ChatOllama(model="codellama:7b-instruct") from langchain_community.llms import Replicate replicate_id = "meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d" llama2_chat_replicate = Replicate( model=replicate_id, input={"temperature": 0.01, "max_length": 500, "top_p": 1} ) llm = llama2_chat from langchain_community.utilities import SQLDatabase db = SQLDatabase.from_uri("sqlite:///nba_roster.db", sample_rows_in_table_info=0) def get_schema(_): return db.get_table_info() def run_query(query): return db.run(query) from langchain_core.prompts import ChatPromptTemplate template = """Based on the table schema below, write a SQL query that would answer the user's question: {schema} Question: {question} SQL Query:""" prompt = ChatPromptTemplate.from_messages( [ ("system", "Given an input question, convert it to a SQL query. No pre-amble."), ("human", template), ] ) from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnablePassthrough sql_response = ( RunnablePassthrough.assign(schema=get_schema) | prompt | llm.bind(stop=["\nSQLResult:"]) | StrOutputParser() ) sql_response.invoke({"question": "What team is Klay Thompson on?"}) template = """Based on the table schema below, question, sql query, and sql response, write a natural language response: {schema} Question: {question} SQL Query: {query} SQL Response: {response}""" prompt_response = ChatPromptTemplate.from_messages( [ ( "system", "Given an input question and SQL response, convert it to a natural language answer. No pre-amble.", ), ("human", template), ] ) full_chain = ( RunnablePassthrough.assign(query=sql_response) | RunnablePassthrough.assign( schema=get_schema, response=lambda x: db.run(x["query"]), ) | prompt_response | llm ) full_chain.invoke({"question": "How many unique teams are there?"}) from langchain.memory import ConversationBufferMemory from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder template = """Given an input question, convert it to a SQL query. No pre-amble. Based on the table schema below, write a SQL query that would answer the user's question: {schema} """ prompt = ChatPromptTemplate.from_messages( [ ("system", template), MessagesPlaceholder(variable_name="history"), ("human", "{question}"), ] ) memory = ConversationBufferMemory(return_messages=True) from langchain_core.runnables import RunnableLambda sql_chain = ( RunnablePassthrough.assign( schema=get_schema, history=RunnableLambda(lambda x: memory.load_memory_variables(x)["history"]), ) | prompt | llm.bind(stop=["\nSQLResult:"]) | StrOutputParser() ) def save(input_output): output = {"output": input_output.pop("output")} memory.save_context(input_output, output) return output["output"] sql_response_memory = RunnablePassthrough.assign(output=sql_chain) | save sql_response_memory.invoke({"question": "What team is Klay Thompson on?"}) template = """Based on the table schema below, question, sql query, and sql response, write a natural language response: {schema} Question: {question} SQL Query: {query} SQL Response: {response}""" prompt_response = ChatPromptTemplate.from_messages( [ ( "system", "Given an input question and SQL response, convert it to a natural language answer. No pre-amble.", ), ("human", template), ] ) full_chain = (
RunnablePassthrough.assign(query=sql_response_memory)
langchain_core.runnables.RunnablePassthrough.assign
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pygithub') import os from langchain.agents import AgentType, initialize_agent from langchain_community.agent_toolkits.github.toolkit import GitHubToolkit from langchain_community.utilities.github import GitHubAPIWrapper from langchain_openai import ChatOpenAI os.environ["GITHUB_APP_ID"] = "123456" os.environ["GITHUB_APP_PRIVATE_KEY"] = "path/to/your/private-key.pem" os.environ["GITHUB_REPOSITORY"] = "username/repo-name" os.environ["GITHUB_BRANCH"] = "bot-branch-name" os.environ["GITHUB_BASE_BRANCH"] = "main" os.environ["OPENAI_API_KEY"] = "" llm = ChatOpenAI(temperature=0, model="gpt-4-1106-preview") github = GitHubAPIWrapper() toolkit = GitHubToolkit.from_github_api_wrapper(github) tools = toolkit.get_tools() agent = initialize_agent( tools, llm, agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True, ) print("Available tools:") for tool in tools: print("\t" + tool.name) agent.run( "You have the software engineering capabilities of a Google Principle engineer. You are tasked with completing issues on a github repository. Please look at the existing issues and complete them." ) from langchain import hub gh_issue_prompt_template = hub.pull("kastanday/new-github-issue") print(gh_issue_prompt_template.template) def format_issue(issue): title = f"Title: {issue.get('title')}." opened_by = f"Opened by user: {issue.get('opened_by')}" body = f"Body: {issue.get('body')}" comments = issue.get("comments") # often too long return "\n".join([title, opened_by, body]) issue = github.get_issue(33) # task to implement a RNA-seq pipeline (bioinformatics) final_gh_issue_prompt = gh_issue_prompt_template.format( issue_description=format_issue(issue) ) print(final_gh_issue_prompt) from langchain.memory.summary_buffer import ConversationSummaryBufferMemory from langchain_core.prompts.chat import MessagesPlaceholder summarizer_llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo") # type: ignore chat_history = MessagesPlaceholder(variable_name="chat_history") memory = ConversationSummaryBufferMemory( memory_key="chat_history", return_messages=True, llm=summarizer_llm, max_token_limit=2_000, ) agent = initialize_agent( tools, llm, agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True, handle_parsing_errors=True, # or pass a function that accepts the error and returns a string max_iterations=30, max_execution_time=None, early_stopping_method="generate", memory=memory, agent_kwargs={ "memory_prompts": [chat_history], "input_variables": ["input", "agent_scratchpad", "chat_history"], "prefix": final_gh_issue_prompt, }, ) from langchain_core.tracers.context import tracing_v2_enabled os.environ["LANGCHAIN_TRACING_V2"] = "true" os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com" os.environ["LANGCHAIN_API_KEY"] = "ls__......" os.environ["LANGCHAIN_PROJECT"] = "Github_Demo_PR" os.environ["LANGCHAIN_WANDB_TRACING"] = "false" with
tracing_v2_enabled(project_name="Github_Demo_PR", tags=["PR_bot"])
langchain_core.tracers.context.tracing_v2_enabled
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lxml') get_ipython().run_line_magic('pip', 'install --upgrade --quiet html2text') from langchain_community.document_loaders import EverNoteLoader loader = EverNoteLoader("example_data/testing.enex") loader.load() loader =
EverNoteLoader("example_data/testing.enex", load_single_document=False)
langchain_community.document_loaders.EverNoteLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet opaqueprompts langchain') import os os.environ["OPAQUEPROMPTS_API_KEY"] = "<OPAQUEPROMPTS_API_KEY>" os.environ["OPENAI_API_KEY"] = "<OPENAI_API_KEY>" from langchain.callbacks.stdout import StdOutCallbackHandler from langchain.chains import LLMChain from langchain.globals import set_debug, set_verbose from langchain.memory import ConversationBufferWindowMemory from langchain.prompts import PromptTemplate from langchain_community.llms import OpaquePrompts from langchain_openai import OpenAI set_debug(True)
set_verbose(True)
langchain.globals.set_verbose
meals = [ "Beef Enchiladas with Feta cheese. Mexican-Greek fusion", "Chicken Flatbreads with red sauce. Italian-Mexican fusion", "Veggie sweet potato quesadillas with vegan cheese", "One-Pan Tortelonni bake with peppers and onions", ] from langchain_openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo-instruct") from langchain.prompts import PromptTemplate PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}". Embed the meal into the given text: "{text_to_personalize}". Prepend a personalized message including the user's name "{user}" and their preference "{preference}". Make it sound good. """ PROMPT = PromptTemplate( input_variables=["meal", "text_to_personalize", "user", "preference"], template=PROMPT_TEMPLATE, ) import langchain_experimental.rl_chain as rl_chain chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs \ believe you will love it!", ) print(response["response"]) for _ in range(5): try: response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) except Exception as e: print(e) print(response["response"]) print() scoring_criteria_template = ( "Given {preference} rank how good or bad this selection is {meal}" ) chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer( llm=llm, scoring_criteria_template_str=scoring_criteria_template ), ) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) print(response["response"]) selection_metadata = response["selection_metadata"] print( f"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}" ) class CustomSelectionScorer(rl_chain.SelectionScorer): def score_response( self, inputs, llm_response: str, event: rl_chain.PickBestEvent ) -> float: print(event.based_on) print(event.to_select_from) selected_meal = event.to_select_from["meal"][event.selected.index] print(f"selected meal: {selected_meal}") if "Tom" in event.based_on["user"]: if "Vegetarian" in event.based_on["preference"]: if "Chicken" in selected_meal or "Beef" in selected_meal: return 0.0 else: return 1.0 else: if "Chicken" in selected_meal or "Beef" in selected_meal: return 1.0 else: return 0.0 else: raise NotImplementedError("I don't know how to score this user") chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer(), ) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) class CustomSelectionScorer(rl_chain.SelectionScorer): def score_preference(self, preference, selected_meal): if "Vegetarian" in preference: if "Chicken" in selected_meal or "Beef" in selected_meal: return 0.0 else: return 1.0 else: if "Chicken" in selected_meal or "Beef" in selected_meal: return 1.0 else: return 0.0 def score_response( self, inputs, llm_response: str, event: rl_chain.PickBestEvent ) -> float: selected_meal = event.to_select_from["meal"][event.selected.index] if "Tom" in event.based_on["user"]: return self.score_preference(event.based_on["preference"], selected_meal) elif "Anna" in event.based_on["user"]: return self.score_preference(event.based_on["preference"], selected_meal) else: raise NotImplementedError("I don't know how to score this user") chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer(), metrics_step=5, metrics_window_size=5, # rolling window average ) random_chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer(), metrics_step=5, metrics_window_size=5, # rolling window average policy=rl_chain.PickBestRandomPolicy, # set the random policy instead of default ) for _ in range(20): try: chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) random_chain.run( meal=
rl_chain.ToSelectFrom(meals)
langchain_experimental.rl_chain.ToSelectFrom
from langchain.agents import Tool from langchain.chains import RetrievalQA from langchain_community.document_loaders import PyPDFLoader from langchain_community.vectorstores import FAISS from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter from pydantic import BaseModel, Field class DocumentInput(BaseModel): question: str = Field() llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613") tools = [] files = [ { "name": "alphabet-earnings", "path": "/Users/harrisonchase/Downloads/2023Q1_alphabet_earnings_release.pdf", }, { "name": "tesla-earnings", "path": "/Users/harrisonchase/Downloads/TSLA-Q1-2023-Update.pdf", }, ] for file in files: loader = PyPDFLoader(file["path"]) pages = loader.load_and_split() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(pages) embeddings = OpenAIEmbeddings() retriever = FAISS.from_documents(docs, embeddings).as_retriever() tools.append( Tool( args_schema=DocumentInput, name=file["name"], description=f"useful when you want to answer questions about {file['name']}", func=
RetrievalQA.from_chain_type(llm=llm, retriever=retriever)
langchain.chains.RetrievalQA.from_chain_type
import os from getpass import getpass os.environ["OPENAI_API_KEY"] = getpass() activeloop_token = getpass("Activeloop Token:") os.environ["ACTIVELOOP_TOKEN"] = activeloop_token get_ipython().system('ls "../../../../../../libs"') from langchain_community.document_loaders import TextLoader root_dir = "../../../../../../libs" docs = [] for dirpath, dirnames, filenames in os.walk(root_dir): for file in filenames: if file.endswith(".py") and "*venv/" not in dirpath: try: loader = TextLoader(os.path.join(dirpath, file), encoding="utf-8") docs.extend(loader.load_and_split()) except Exception: pass print(f"{len(docs)}") from langchain_text_splitters import CharacterTextSplitter text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(docs) print(f"{len(texts)}") from langchain_openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() embeddings from langchain_community.vectorstores import DeepLake username = "<USERNAME_OR_ORG>" db = DeepLake.from_documents( texts, embeddings, dataset_path=f"hub://{username}/langchain-code", overwrite=True ) db db = DeepLake( dataset_path=f"hub://{username}/langchain-code", read_only=True, embedding=embeddings, ) retriever = db.as_retriever() retriever.search_kwargs["distance_metric"] = "cos" retriever.search_kwargs["fetch_k"] = 20 retriever.search_kwargs["maximal_marginal_relevance"] = True retriever.search_kwargs["k"] = 20 def filter(x): if "something" in x["text"].data()["value"]: return False metadata = x["metadata"].data()["value"] return "only_this" in metadata["source"] or "also_that" in metadata["source"] from langchain.chains import ConversationalRetrievalChain from langchain_openai import ChatOpenAI model = ChatOpenAI( model_name="gpt-3.5-turbo-0613" ) # 'ada' 'gpt-3.5-turbo-0613' 'gpt-4', qa =
ConversationalRetrievalChain.from_llm(model, retriever=retriever)
langchain.chains.ConversationalRetrievalChain.from_llm
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain -q') etherscanAPIKey = "..." import os from langchain_community.document_loaders import EtherscanLoader os.environ["ETHERSCAN_API_KEY"] = etherscanAPIKey account_address = "0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b" loader =
EtherscanLoader(account_address, filter="erc20_transaction")
langchain_community.document_loaders.EtherscanLoader
from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from langchain_community.chat_models import JinaChat from langchain_core.messages import HumanMessage, SystemMessage chat =
JinaChat(temperature=0)
langchain_community.chat_models.JinaChat
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-openai langchain-anthropic langchain-community wikipedia') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass() os.environ["ANTHROPIC_API_KEY"] = getpass.getpass() from langchain_community.retrievers import WikipediaRetriever from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) wiki =
WikipediaRetriever(top_k_results=6, doc_content_chars_max=2000)
langchain_community.retrievers.WikipediaRetriever
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain label-studio label-studio-sdk langchain-openai') import os os.environ["LABEL_STUDIO_URL"] = "<YOUR-LABEL-STUDIO-URL>" # e.g. http://localhost:8080 os.environ["LABEL_STUDIO_API_KEY"] = "<YOUR-LABEL-STUDIO-API-KEY>" os.environ["OPENAI_API_KEY"] = "<YOUR-OPENAI-API-KEY>" from langchain.callbacks import LabelStudioCallbackHandler from langchain_openai import OpenAI llm = OpenAI( temperature=0, callbacks=[LabelStudioCallbackHandler(project_name="My Project")] ) print(llm("Tell me a joke")) from langchain.callbacks import LabelStudioCallbackHandler from langchain_core.messages import HumanMessage, SystemMessage from langchain_openai import ChatOpenAI chat_llm = ChatOpenAI( callbacks=[ LabelStudioCallbackHandler( mode="chat", project_name="New Project with Chat", ) ] ) llm_results = chat_llm( [
SystemMessage(content="Always use a lot of emojis")
langchain_core.messages.SystemMessage
from langchain.prompts import ChatMessagePromptTemplate prompt = "May the {subject} be with you" chat_message_prompt = ChatMessagePromptTemplate.from_template( role="Jedi", template=prompt ) chat_message_prompt.format(subject="force") from langchain.prompts import ( ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, ) human_prompt = "Summarize our conversation so far in {word_count} words." human_message_template = HumanMessagePromptTemplate.from_template(human_prompt) chat_prompt = ChatPromptTemplate.from_messages( [MessagesPlaceholder(variable_name="conversation"), human_message_template] ) from langchain_core.messages import AIMessage, HumanMessage human_message = HumanMessage(content="What is the best way to learn programming?") ai_message =
AIMessage( content="""\ 1. Choose a programming language: Decide on a programming language that you want to learn. 2. Start with the basics: Familiarize yourself with the basic programming concepts such as variables, data types and control structures. 3. Practice, practice, practice: The best way to learn programming is through hands-on experience\ """ )
langchain_core.messages.AIMessage