id
stringlengths
14
16
text
stringlengths
45
2.73k
source
stringlengths
49
114
251d5d6cfb51-0
Source code for langchain.agents.agent_toolkits.sql.base """SQL agent.""" from typing import Any, List, Optional from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.sql.prompt import SQL_PREFIX, SQL_SUFFIX from langchain.agents.agent_toolkits.sql.toolkit import SQLDatabaseToolkit from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain from langchain.llms.base import BaseLLM [docs]def create_sql_agent( llm: BaseLLM, toolkit: SQLDatabaseToolkit, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = SQL_PREFIX, suffix: str = SQL_SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, top_k: int = 10, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = "force", verbose: bool = False, **kwargs: Any, ) -> AgentExecutor: """Construct a sql agent from an LLM and tools.""" tools = toolkit.get_tools() prefix = prefix.format(dialect=toolkit.dialect, top_k=top_k) prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, )
https://python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/sql/base.html
251d5d6cfb51-1
prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, verbose=verbose, max_iterations=max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/sql/base.html
64d1175aa8a7-0
Source code for langchain.agents.react.base """Chain that implements the ReAct paper from https://arxiv.org/pdf/2210.03629.pdf.""" from typing import Any, List, Optional, Sequence from pydantic import Field from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser from langchain.agents.agent_types import AgentType from langchain.agents.react.output_parser import ReActOutputParser from langchain.agents.react.textworld_prompt import TEXTWORLD_PROMPT from langchain.agents.react.wiki_prompt import WIKI_PROMPT from langchain.agents.tools import Tool from langchain.docstore.base import Docstore from langchain.docstore.document import Document from langchain.llms.base import BaseLLM from langchain.prompts.base import BasePromptTemplate from langchain.tools.base import BaseTool class ReActDocstoreAgent(Agent): """Agent for the ReAct chain.""" output_parser: AgentOutputParser = Field(default_factory=ReActOutputParser) @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return ReActOutputParser() @property def _agent_type(self) -> str: """Return Identifier of agent type.""" return AgentType.REACT_DOCSTORE @classmethod def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: """Return default prompt.""" return WIKI_PROMPT @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: if len(tools) != 2: raise ValueError(f"Exactly two tools must be specified, but got {tools}") tool_names = {tool.name for tool in tools} if tool_names != {"Lookup", "Search"}:
https://python.langchain.com/en/latest/_modules/langchain/agents/react/base.html
64d1175aa8a7-1
if tool_names != {"Lookup", "Search"}: raise ValueError( f"Tool names should be Lookup and Search, got {tool_names}" ) @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: " @property def _stop(self) -> List[str]: return ["\nObservation:"] @property def llm_prefix(self) -> str: """Prefix to append the LLM call with.""" return "Thought:" class DocstoreExplorer: """Class to assist with exploration of a document store.""" def __init__(self, docstore: Docstore): """Initialize with a docstore, and set initial document to None.""" self.docstore = docstore self.document: Optional[Document] = None self.lookup_str = "" self.lookup_index = 0 def search(self, term: str) -> str: """Search for a term in the docstore, and if found save.""" result = self.docstore.search(term) if isinstance(result, Document): self.document = result return self._summary else: self.document = None return result def lookup(self, term: str) -> str: """Lookup a term in document (if saved).""" if self.document is None: raise ValueError("Cannot lookup without a successful search first") if term.lower() != self.lookup_str: self.lookup_str = term.lower() self.lookup_index = 0 else: self.lookup_index += 1 lookups = [p for p in self._paragraphs if self.lookup_str in p.lower()]
https://python.langchain.com/en/latest/_modules/langchain/agents/react/base.html
64d1175aa8a7-2
if len(lookups) == 0: return "No Results" elif self.lookup_index >= len(lookups): return "No More Results" else: result_prefix = f"(Result {self.lookup_index + 1}/{len(lookups)})" return f"{result_prefix} {lookups[self.lookup_index]}" @property def _summary(self) -> str: return self._paragraphs[0] @property def _paragraphs(self) -> List[str]: if self.document is None: raise ValueError("Cannot get paragraphs without a document") return self.document.page_content.split("\n\n") [docs]class ReActTextWorldAgent(ReActDocstoreAgent): """Agent for the ReAct TextWorld chain.""" [docs] @classmethod def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: """Return default prompt.""" return TEXTWORLD_PROMPT @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: if len(tools) != 1: raise ValueError(f"Exactly one tool must be specified, but got {tools}") tool_names = {tool.name for tool in tools} if tool_names != {"Play"}: raise ValueError(f"Tool name should be Play, got {tool_names}") [docs]class ReActChain(AgentExecutor): """Chain that implements the ReAct paper. Example: .. code-block:: python from langchain import ReActChain, OpenAI react = ReAct(llm=OpenAI()) """ def __init__(self, llm: BaseLLM, docstore: Docstore, **kwargs: Any):
https://python.langchain.com/en/latest/_modules/langchain/agents/react/base.html
64d1175aa8a7-3
"""Initialize with the LLM and a docstore.""" docstore_explorer = DocstoreExplorer(docstore) tools = [ Tool( name="Search", func=docstore_explorer.search, description="Search for a term in the docstore.", ), Tool( name="Lookup", func=docstore_explorer.lookup, description="Lookup a term in the docstore.", ), ] agent = ReActDocstoreAgent.from_llm_and_tools(llm, tools) super().__init__(agent=agent, tools=tools, **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/_modules/langchain/agents/react/base.html
7784ec1ac67f-0
Source code for langchain.agents.self_ask_with_search.base """Chain that does self ask with search.""" from typing import Any, Sequence, Union from pydantic import Field from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser from langchain.agents.agent_types import AgentType from langchain.agents.self_ask_with_search.output_parser import SelfAskOutputParser from langchain.agents.self_ask_with_search.prompt import PROMPT from langchain.agents.tools import Tool from langchain.llms.base import BaseLLM from langchain.prompts.base import BasePromptTemplate from langchain.tools.base import BaseTool from langchain.utilities.google_serper import GoogleSerperAPIWrapper from langchain.utilities.serpapi import SerpAPIWrapper class SelfAskWithSearchAgent(Agent): """Agent for the self-ask-with-search paper.""" output_parser: AgentOutputParser = Field(default_factory=SelfAskOutputParser) @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return SelfAskOutputParser() @property def _agent_type(self) -> str: """Return Identifier of agent type.""" return AgentType.SELF_ASK_WITH_SEARCH @classmethod def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: """Prompt does not depend on tools.""" return PROMPT @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: if len(tools) != 1: raise ValueError(f"Exactly one tool must be specified, but got {tools}") tool_names = {tool.name for tool in tools} if tool_names != {"Intermediate Answer"}: raise ValueError(
https://python.langchain.com/en/latest/_modules/langchain/agents/self_ask_with_search/base.html
7784ec1ac67f-1
if tool_names != {"Intermediate Answer"}: raise ValueError( f"Tool name should be Intermediate Answer, got {tool_names}" ) @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Intermediate answer: " @property def llm_prefix(self) -> str: """Prefix to append the LLM call with.""" return "" [docs]class SelfAskWithSearchChain(AgentExecutor): """Chain that does self ask with search. Example: .. code-block:: python from langchain import SelfAskWithSearchChain, OpenAI, GoogleSerperAPIWrapper search_chain = GoogleSerperAPIWrapper() self_ask = SelfAskWithSearchChain(llm=OpenAI(), search_chain=search_chain) """ def __init__( self, llm: BaseLLM, search_chain: Union[GoogleSerperAPIWrapper, SerpAPIWrapper], **kwargs: Any, ): """Initialize with just an LLM and a search chain.""" search_tool = Tool( name="Intermediate Answer", func=search_chain.run, description="Search" ) agent = SelfAskWithSearchAgent.from_llm_and_tools(llm, [search_tool]) super().__init__(agent=agent, tools=[search_tool], **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/_modules/langchain/agents/self_ask_with_search/base.html
537b9d5bd45d-0
Source code for langchain.agents.conversational.base """An agent designed to hold a conversation in addition to using tools.""" from __future__ import annotations from typing import Any, List, Optional, Sequence from pydantic import Field from langchain.agents.agent import Agent, AgentOutputParser from langchain.agents.agent_types import AgentType from langchain.agents.conversational.output_parser import ConvoOutputParser from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX from langchain.callbacks.base import BaseCallbackManager from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.schema import BaseLanguageModel from langchain.tools.base import BaseTool [docs]class ConversationalAgent(Agent): """An agent designed to hold a conversation in addition to using tools.""" ai_prefix: str = "AI" output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser) @classmethod def _get_default_output_parser( cls, ai_prefix: str = "AI", **kwargs: Any ) -> AgentOutputParser: return ConvoOutputParser(ai_prefix=ai_prefix) @property def _agent_type(self) -> str: """Return Identifier of agent type.""" return AgentType.CONVERSATIONAL_REACT_DESCRIPTION @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: " @property def llm_prefix(self) -> str: """Prefix to append the llm call with.""" return "Thought:" [docs] @classmethod def create_prompt( cls, tools: Sequence[BaseTool], prefix: str = PREFIX,
https://python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html
537b9d5bd45d-1
cls, tools: Sequence[BaseTool], prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, ai_prefix: str = "AI", human_prefix: str = "Human", input_variables: Optional[List[str]] = None, ) -> PromptTemplate: """Create prompt in the style of the zero shot agent. Args: tools: List of tools the agent will have access to, used to format the prompt. prefix: String to put before the list of tools. suffix: String to put after the list of tools. ai_prefix: String to use before AI output. human_prefix: String to use before human output. input_variables: List of input variables the final prompt will expect. Returns: A PromptTemplate with the template assembled from the pieces here. """ tool_strings = "\n".join( [f"> {tool.name}: {tool.description}" for tool in tools] ) tool_names = ", ".join([tool.name for tool in tools]) format_instructions = format_instructions.format( tool_names=tool_names, ai_prefix=ai_prefix, human_prefix=human_prefix ) template = "\n\n".join([prefix, tool_strings, format_instructions, suffix]) if input_variables is None: input_variables = ["input", "chat_history", "agent_scratchpad"] return PromptTemplate(template=template, input_variables=input_variables) [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None,
https://python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html
537b9d5bd45d-2
callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, ai_prefix: str = "AI", human_prefix: str = "Human", input_variables: Optional[List[str]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) prompt = cls.create_prompt( tools, ai_prefix=ai_prefix, human_prefix=human_prefix, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser( ai_prefix=ai_prefix ) return cls( llm_chain=llm_chain, allowed_tools=tool_names, ai_prefix=ai_prefix, output_parser=_output_parser, **kwargs, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/_modules/langchain/agents/conversational/base.html
9a856bb07311-0
Source code for langchain.agents.mrkl.base """Attempt to implement MRKL systems as described in arxiv.org/pdf/2205.00445.pdf.""" from __future__ import annotations from typing import Any, Callable, List, NamedTuple, Optional, Sequence from pydantic import Field from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser from langchain.agents.agent_types import AgentType from langchain.agents.mrkl.output_parser import MRKLOutputParser from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX from langchain.agents.tools import Tool from langchain.callbacks.base import BaseCallbackManager from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.schema import BaseLanguageModel from langchain.tools.base import BaseTool class ChainConfig(NamedTuple): """Configuration for chain to use in MRKL system. Args: action_name: Name of the action. action: Action function to call. action_description: Description of the action. """ action_name: str action: Callable action_description: str [docs]class ZeroShotAgent(Agent): """Agent for the MRKL chain.""" output_parser: AgentOutputParser = Field(default_factory=MRKLOutputParser) @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return MRKLOutputParser() @property def _agent_type(self) -> str: """Return Identifier of agent type.""" return AgentType.ZERO_SHOT_REACT_DESCRIPTION @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: "
https://python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html
9a856bb07311-1
"""Prefix to append the observation with.""" return "Observation: " @property def llm_prefix(self) -> str: """Prefix to append the llm call with.""" return "Thought:" [docs] @classmethod def create_prompt( cls, tools: Sequence[BaseTool], prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, ) -> PromptTemplate: """Create prompt in the style of the zero shot agent. Args: tools: List of tools the agent will have access to, used to format the prompt. prefix: String to put before the list of tools. suffix: String to put after the list of tools. input_variables: List of input variables the final prompt will expect. Returns: A PromptTemplate with the template assembled from the pieces here. """ tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools]) tool_names = ", ".join([tool.name for tool in tools]) format_instructions = format_instructions.format(tool_names=tool_names) template = "\n\n".join([prefix, tool_strings, format_instructions, suffix]) if input_variables is None: input_variables = ["input", "agent_scratchpad"] return PromptTemplate(template=template, input_variables=input_variables) [docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None,
https://python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html
9a856bb07311-2
callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) prompt = cls.create_prompt( tools, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser() return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, ) @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: for tool in tools: if tool.description is None: raise ValueError( f"Got a tool {tool.name} without a description. For this agent, " f"a description must always be provided." ) [docs]class MRKLChain(AgentExecutor): """Chain that implements the MRKL system. Example: .. code-block:: python from langchain import OpenAI, MRKLChain from langchain.chains.mrkl.base import ChainConfig llm = OpenAI(temperature=0) prompt = PromptTemplate(...)
https://python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html
9a856bb07311-3
llm = OpenAI(temperature=0) prompt = PromptTemplate(...) chains = [...] mrkl = MRKLChain.from_chains(llm=llm, prompt=prompt) """ [docs] @classmethod def from_chains( cls, llm: BaseLanguageModel, chains: List[ChainConfig], **kwargs: Any ) -> AgentExecutor: """User friendly way to initialize the MRKL chain. This is intended to be an easy way to get up and running with the MRKL chain. Args: llm: The LLM to use as the agent LLM. chains: The chains the MRKL system has access to. **kwargs: parameters to be passed to initialization. Returns: An initialized MRKL chain. Example: .. code-block:: python from langchain import LLMMathChain, OpenAI, SerpAPIWrapper, MRKLChain from langchain.chains.mrkl.base import ChainConfig llm = OpenAI(temperature=0) search = SerpAPIWrapper() llm_math_chain = LLMMathChain(llm=llm) chains = [ ChainConfig( action_name = "Search", action=search.search, action_description="useful for searching" ), ChainConfig( action_name="Calculator", action=llm_math_chain.run, action_description="useful for doing math" ) ] mrkl = MRKLChain.from_chains(llm, chains) """ tools = [ Tool( name=c.action_name, func=c.action, description=c.action_description, )
https://python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html
9a856bb07311-4
func=c.action, description=c.action_description, ) for c in chains ] agent = ZeroShotAgent.from_llm_and_tools(llm, tools) return cls(agent=agent, tools=tools, **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/_modules/langchain/agents/mrkl/base.html
d61e75eb8888-0
Source code for langchain.embeddings.llamacpp """Wrapper around llama.cpp embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.embeddings.base import Embeddings [docs]class LlamaCppEmbeddings(BaseModel, Embeddings): """Wrapper around llama.cpp embedding models. To use, you should have the llama-cpp-python library installed, and provide the path to the Llama model as a named parameter to the constructor. Check out: https://github.com/abetlen/llama-cpp-python Example: .. code-block:: python from langchain.embeddings import LlamaCppEmbeddings llama = LlamaCppEmbeddings(model_path="/path/to/model.bin") """ client: Any #: :meta private: model_path: str n_ctx: int = Field(512, alias="n_ctx") """Token context window.""" n_parts: int = Field(-1, alias="n_parts") """Number of parts to split the model into. If -1, the number of parts is automatically determined.""" seed: int = Field(-1, alias="seed") """Seed. If -1, a random seed is used.""" f16_kv: bool = Field(False, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" use_mlock: bool = Field(False, alias="use_mlock")
https://python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
d61e75eb8888-1
use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" n_threads: Optional[int] = Field(None, alias="n_threads") """Number of threads to use. If None, the number of threads is automatically determined.""" n_batch: Optional[int] = Field(8, alias="n_batch") """Number of tokens to process in parallel. Should be a number between 1 and n_ctx.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that llama-cpp-python library is installed.""" model_path = values["model_path"] n_ctx = values["n_ctx"] n_parts = values["n_parts"] seed = values["seed"] f16_kv = values["f16_kv"] logits_all = values["logits_all"] vocab_only = values["vocab_only"] use_mlock = values["use_mlock"] n_threads = values["n_threads"] n_batch = values["n_batch"] try: from llama_cpp import Llama values["client"] = Llama( model_path=model_path, n_ctx=n_ctx, n_parts=n_parts, seed=seed, f16_kv=f16_kv, logits_all=logits_all, vocab_only=vocab_only, use_mlock=use_mlock, n_threads=n_threads, n_batch=n_batch, embedding=True, ) except ImportError: raise ModuleNotFoundError( "Could not import llama-cpp-python library. "
https://python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
d61e75eb8888-2
raise ModuleNotFoundError( "Could not import llama-cpp-python library. " "Please install the llama-cpp-python library to " "use this embedding model: pip install llama-cpp-python" ) except Exception: raise NameError(f"Could not load Llama model from path: {model_path}") return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed a list of documents using the Llama model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = [self.client.embed(text) for text in texts] return [list(map(float, e)) for e in embeddings] [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using the Llama model. Args: text: The text to embed. Returns: Embeddings for the text. """ embedding = self.client.embed(text) return list(map(float, embedding)) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
64289af6ade9-0
Source code for langchain.embeddings.huggingface_hub """Wrapper around HuggingFace Hub embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env DEFAULT_REPO_ID = "sentence-transformers/all-mpnet-base-v2" VALID_TASKS = ("feature-extraction",) [docs]class HuggingFaceHubEmbeddings(BaseModel, Embeddings): """Wrapper around HuggingFaceHub embedding models. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import HuggingFaceHubEmbeddings repo_id = "sentence-transformers/all-mpnet-base-v2" hf = HuggingFaceHubEmbeddings( repo_id=repo_id, task="feature-extraction", huggingfacehub_api_token="my-api-key", ) """ client: Any #: :meta private: repo_id: str = DEFAULT_REPO_ID """Model name to use.""" task: Optional[str] = "feature-extraction" """Task to call the model with.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict:
https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html
64289af6ade9-1
@root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.inference_api import InferenceApi repo_id = values["repo_id"] if not repo_id.startswith("sentence-transformers"): raise ValueError( "Currently only 'sentence-transformers' embedding models " f"are supported. Got invalid 'repo_id' {repo_id}." ) client = InferenceApi( repo_id=repo_id, token=huggingfacehub_api_token, task=values.get("task"), ) if client.task not in VALID_TASKS: raise ValueError( f"Got invalid task {client.task}, " f"currently only {VALID_TASKS} are supported" ) values["client"] = client except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to HuggingFaceHub's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ # replace newlines, which can negatively affect performance. texts = [text.replace("\n", " ") for text in texts]
https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html
64289af6ade9-2
texts = [text.replace("\n", " ") for text in texts] _model_kwargs = self.model_kwargs or {} responses = self.client(inputs=texts, params=_model_kwargs) return responses [docs] def embed_query(self, text: str) -> List[float]: """Call out to HuggingFaceHub's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embeddings for the text. """ response = self.embed_documents([text])[0] return response By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html
34d7967ae065-0
Source code for langchain.embeddings.huggingface """Wrapper around HuggingFace embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, Field from langchain.embeddings.base import Embeddings DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2" DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large" DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: " DEFAULT_QUERY_INSTRUCTION = ( "Represent the question for retrieving supporting documents: " ) [docs]class HuggingFaceEmbeddings(BaseModel, Embeddings): """Wrapper around sentence_transformers embedding models. To use, you should have the ``sentence_transformers`` python package installed. Example: .. code-block:: python from langchain.embeddings import HuggingFaceEmbeddings model_name = "sentence-transformers/all-mpnet-base-v2" model_kwargs = {'device': 'cpu'} hf = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs) """ client: Any #: :meta private: model_name: str = DEFAULT_MODEL_NAME """Model name to use.""" cache_folder: Optional[str] = None """Path to store models. Can be also set by SENTENCE_TRANSFORMERS_HOME enviroment variable.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass to the model.""" def __init__(self, **kwargs: Any): """Initialize the sentence_transformer.""" super().__init__(**kwargs) try: import sentence_transformers self.client = sentence_transformers.SentenceTransformer( self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
34d7967ae065-1
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs ) except ImportError: raise ValueError( "Could not import sentence_transformers python package. " "Please install it with `pip install sentence_transformers`." ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.client.encode(texts) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace transformer model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embedding = self.client.encode(text) return embedding.tolist() [docs]class HuggingFaceInstructEmbeddings(BaseModel, Embeddings): """Wrapper around sentence_transformers embedding models. To use, you should have the ``sentence_transformers`` and ``InstructorEmbedding`` python package installed. Example: .. code-block:: python from langchain.embeddings import HuggingFaceInstructEmbeddings model_name = "hkunlp/instructor-large" model_kwargs = {'device': 'cpu'} hf = HuggingFaceInstructEmbeddings( model_name=model_name, model_kwargs=model_kwargs
https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
34d7967ae065-2
model_name=model_name, model_kwargs=model_kwargs ) """ client: Any #: :meta private: model_name: str = DEFAULT_INSTRUCT_MODEL """Model name to use.""" cache_folder: Optional[str] = None """Path to store models. Can be also set by SENTENCE_TRANSFORMERS_HOME enviroment variable.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass to the model.""" embed_instruction: str = DEFAULT_EMBED_INSTRUCTION """Instruction to use for embedding documents.""" query_instruction: str = DEFAULT_QUERY_INSTRUCTION """Instruction to use for embedding query.""" def __init__(self, **kwargs: Any): """Initialize the sentence_transformer.""" super().__init__(**kwargs) try: from InstructorEmbedding import INSTRUCTOR self.client = INSTRUCTOR( self.model_name, cache_folder=self.cache_folder, **self.model_kwargs ) except ImportError as e: raise ValueError("Dependencies for InstructorEmbedding not found.") from e class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace instruct model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [[self.embed_instruction, text] for text in texts] embeddings = self.client.encode(instruction_pairs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]:
https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
34d7967ae065-3
[docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace instruct model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = [self.query_instruction, text] embedding = self.client.encode([instruction_pair])[0] return embedding.tolist() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
416d0c13477b-0
Source code for langchain.embeddings.aleph_alpha from typing import Any, Dict, List, Optional from pydantic import BaseModel, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings): """ Wrapper for Aleph Alpha's Asymmetric Embeddings AA provides you with an endpoint to embed a document and a query. The models were optimized to make the embeddings of documents and the query for a document as similar as possible. To learn more, check out: https://docs.aleph-alpha.com/docs/tasks/semantic_embed/ Example: .. code-block:: python from aleph_alpha import AlephAlphaAsymmetricSemanticEmbedding embeddings = AlephAlphaSymmetricSemanticEmbedding() document = "This is a content of the document" query = "What is the content of the document?" doc_result = embeddings.embed_documents([document]) query_result = embeddings.embed_query(query) """ client: Any #: :meta private: model: Optional[str] = "luminous-base" """Model name to use.""" hosting: Optional[str] = "https://api.aleph-alpha.com" """Optional parameter that specifies which datacenters may process the request.""" normalize: Optional[bool] = True """Should returned embeddings be normalized""" compress_to_size: Optional[int] = 128 """Should the returned embeddings come back as an original 5120-dim vector, or should it be compressed to 128-dim.""" contextual_control_threshold: Optional[int] = None """Attention control parameters only apply to those tokens that have
https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
416d0c13477b-1
"""Attention control parameters only apply to those tokens that have explicitly been set in the request.""" control_log_additive: Optional[bool] = True """Apply controls on prompt items by adding the log(control_factor) to attention scores.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY" ) try: from aleph_alpha_client import Client except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) values["client"] = Client(token=aleph_alpha_api_key) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Aleph Alpha's asymmetric Document endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ try: from aleph_alpha_client import ( Prompt, SemanticEmbeddingRequest, SemanticRepresentation, ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) document_embeddings = [] for text in texts: document_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Document, "compress_to_size": self.compress_to_size,
https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
416d0c13477b-2
"representation": SemanticRepresentation.Document, "compress_to_size": self.compress_to_size, "normalize": self.normalize, "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, } document_request = SemanticEmbeddingRequest(**document_params) document_response = self.client.semantic_embed( request=document_request, model=self.model ) document_embeddings.append(document_response.embedding) return document_embeddings [docs] def embed_query(self, text: str) -> List[float]: """Call out to Aleph Alpha's asymmetric, query embedding endpoint Args: text: The text to embed. Returns: Embeddings for the text. """ try: from aleph_alpha_client import ( Prompt, SemanticEmbeddingRequest, SemanticRepresentation, ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) symmetric_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Query, "compress_to_size": self.compress_to_size, "normalize": self.normalize, "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, } symmetric_request = SemanticEmbeddingRequest(**symmetric_params) symmetric_response = self.client.semantic_embed( request=symmetric_request, model=self.model ) return symmetric_response.embedding [docs]class AlephAlphaSymmetricSemanticEmbedding(AlephAlphaAsymmetricSemanticEmbedding):
https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
416d0c13477b-3
"""The symmetric version of the Aleph Alpha's semantic embeddings. The main difference is that here, both the documents and queries are embedded with a SemanticRepresentation.Symmetric Example: .. code-block:: python from aleph_alpha import AlephAlphaSymmetricSemanticEmbedding embeddings = AlephAlphaAsymmetricSemanticEmbedding() text = "This is a test text" doc_result = embeddings.embed_documents([text]) query_result = embeddings.embed_query(text) """ def _embed(self, text: str) -> List[float]: try: from aleph_alpha_client import ( Prompt, SemanticEmbeddingRequest, SemanticRepresentation, ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) query_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Symmetric, "compress_to_size": self.compress_to_size, "normalize": self.normalize, "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, } query_request = SemanticEmbeddingRequest(**query_params) query_response = self.client.semantic_embed( request=query_request, model=self.model ) return query_response.embedding [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Aleph Alpha's Document endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ document_embeddings = [] for text in texts:
https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
416d0c13477b-4
""" document_embeddings = [] for text in texts: document_embeddings.append(self._embed(text)) return document_embeddings [docs] def embed_query(self, text: str) -> List[float]: """Call out to Aleph Alpha's asymmetric, query embedding endpoint Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embed(text) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
99db303956b4-0
Source code for langchain.embeddings.openai """Wrapper around OpenAI embedding models.""" from __future__ import annotations import logging from typing import ( Any, Callable, Dict, List, Literal, Optional, Set, Tuple, Union, ) import numpy as np from pydantic import BaseModel, Extra, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call."""
https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
99db303956b4-1
"""Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: return embeddings.client.create(**kwargs) return _embed_with_retry(**kwargs) [docs]class OpenAIEmbeddings(BaseModel, Embeddings): """Wrapper around OpenAI embedding models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings openai = OpenAIEmbeddings(openai_api_key="my-api-key") In order to use the library with Microsoft Azure endpoints, you need to set the OPENAI_API_TYPE, OPENAI_API_BASE, OPENAI_API_KEY and optionally and API_VERSION. The OPENAI_API_TYPE must be set to 'azure' and the others correspond to the properties of your endpoint. In addition, the deployment name must be passed as the model parameter. Example: .. code-block:: python import os os.environ["OPENAI_API_TYPE"] = "azure" os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/" os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key" from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings( deployment="your-embeddings-deployment-name", model="your-embeddings-model-name" ) text = "This is a test query." query_result = embeddings.embed_query(text)
https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
99db303956b4-2
text = "This is a test query." query_result = embeddings.embed_query(text) """ client: Any #: :meta private: model: str = "text-embedding-ada-002" deployment: str = model # to support Azure OpenAI Service custom deployment names embedding_ctx_length: int = 8191 openai_api_key: Optional[str] = None openai_organization: Optional[str] = None allowed_special: Union[Literal["all"], Set[str]] = set() disallowed_special: Union[Literal["all"], Set[str], Tuple[()]] = "all" chunk_size: int = 1000 """Maximum number of texts to embed in each batch""" max_retries: int = 6 """Maximum number of retries to make when generating.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai openai.api_key = openai_api_key if openai_organization: openai.organization = openai_organization values["client"] = openai.Embedding except ImportError: raise ValueError( "Could not import openai python package. " "Please install it with `pip install openai`." )
https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
99db303956b4-3
"Please install it with `pip install openai`." ) return values # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb def _get_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for i in range(len(texts))] try: import tiktoken tokens = [] indices = [] encoding = tiktoken.model.encoding_for_model(self.model) for i, text in enumerate(texts): # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens += [token[j : j + self.embedding_ctx_length]] indices += [i] batched_embeddings = [] _chunk_size = chunk_size or self.chunk_size for i in range(0, len(tokens), _chunk_size): response = embed_with_retry( self, input=tokens[i : i + _chunk_size], engine=self.deployment, ) batched_embeddings += [r["embedding"] for r in response["data"]] results: List[List[List[float]]] = [[] for i in range(len(texts))] lens: List[List[int]] = [[] for i in range(len(texts))] for i in range(len(indices)): results[indices[i]].append(batched_embeddings[i])
https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
99db303956b4-4
results[indices[i]].append(batched_embeddings[i]) lens[indices[i]].append(len(batched_embeddings[i])) for i in range(len(texts)): average = np.average(results[i], axis=0, weights=lens[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings except ImportError: raise ValueError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`." ) def _embedding_func(self, text: str, *, engine: str) -> List[float]: """Call out to OpenAI's embedding endpoint.""" # handle large input text if self.embedding_ctx_length > 0: return self._get_len_safe_embeddings([text], engine=engine)[0] else: # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") return embed_with_retry(self, input=[text], engine=engine)["data"][0][ "embedding" ] [docs] def embed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # handle batches of large input text if self.embedding_ctx_length > 0:
https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
99db303956b4-5
# handle batches of large input text if self.embedding_ctx_length > 0: return self._get_len_safe_embeddings(texts, engine=self.deployment) else: results = [] _chunk_size = chunk_size or self.chunk_size for i in range(0, len(texts), _chunk_size): response = embed_with_retry( self, input=texts[i : i + _chunk_size], engine=self.deployment, ) results += [r["embedding"] for r in response["data"]] return results [docs] def embed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = self._embedding_func(text, engine=self.deployment) return embedding By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
8471585f7f68-0
Source code for langchain.embeddings.self_hosted_hugging_face """Wrapper around HuggingFace embedding models for self-hosted remote hardware.""" import importlib import logging from typing import Any, Callable, List, Optional from langchain.embeddings.self_hosted import SelfHostedEmbeddings DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2" DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large" DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: " DEFAULT_QUERY_INSTRUCTION = ( "Represent the question for retrieving supporting documents: " ) logger = logging.getLogger(__name__) def _embed_documents(client: Any, *args: Any, **kwargs: Any) -> List[List[float]]: """Inference function to send to the remote hardware. Accepts a sentence_transformer model_id and returns a list of embeddings for each document in the batch. """ return client.encode(*args, **kwargs) def load_embedding_model(model_id: str, instruct: bool = False, device: int = 0) -> Any: """Load the embedding model.""" if not instruct: import sentence_transformers client = sentence_transformers.SentenceTransformer(model_id) else: from InstructorEmbedding import INSTRUCTOR client = INSTRUCTOR(model_id) if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0: logger.warning(
https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
8471585f7f68-1
if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) client = client.to(device) return client [docs]class SelfHostedHuggingFaceEmbeddings(SelfHostedEmbeddings): """Runs sentence_transformers embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example: .. code-block:: python from langchain.embeddings import SelfHostedHuggingFaceEmbeddings import runhouse as rh model_name = "sentence-transformers/all-mpnet-base-v2" gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") hf = SelfHostedHuggingFaceEmbeddings(model_name=model_name, hardware=gpu) """ client: Any #: :meta private: model_id: str = DEFAULT_MODEL_NAME """Model name to use.""" model_reqs: List[str] = ["./", "sentence_transformers", "torch"] """Requirements to install on hardware to inference the model.""" hardware: Any """Remote hardware to send the inference function to.""" model_load_fn: Callable = load_embedding_model
https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
8471585f7f68-2
model_load_fn: Callable = load_embedding_model """Function to load the model remotely on the server.""" load_fn_kwargs: Optional[dict] = None """Key word arguments to pass to the model load function.""" inference_fn: Callable = _embed_documents """Inference function to extract the embeddings.""" def __init__(self, **kwargs: Any): """Initialize the remote inference function.""" load_fn_kwargs = kwargs.pop("load_fn_kwargs", {}) load_fn_kwargs["model_id"] = load_fn_kwargs.get("model_id", DEFAULT_MODEL_NAME) load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", False) load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0) super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) [docs]class SelfHostedHuggingFaceInstructEmbeddings(SelfHostedHuggingFaceEmbeddings): """Runs InstructorEmbedding embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example: .. code-block:: python from langchain.embeddings import SelfHostedHuggingFaceInstructEmbeddings import runhouse as rh model_name = "hkunlp/instructor-large" gpu = rh.cluster(name='rh-a10x', instance_type='A100:1') hf = SelfHostedHuggingFaceInstructEmbeddings( model_name=model_name, hardware=gpu) """
https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
8471585f7f68-3
model_name=model_name, hardware=gpu) """ model_id: str = DEFAULT_INSTRUCT_MODEL """Model name to use.""" embed_instruction: str = DEFAULT_EMBED_INSTRUCTION """Instruction to use for embedding documents.""" query_instruction: str = DEFAULT_QUERY_INSTRUCTION """Instruction to use for embedding query.""" model_reqs: List[str] = ["./", "InstructorEmbedding", "torch"] """Requirements to install on hardware to inference the model.""" def __init__(self, **kwargs: Any): """Initialize the remote inference function.""" load_fn_kwargs = kwargs.pop("load_fn_kwargs", {}) load_fn_kwargs["model_id"] = load_fn_kwargs.get( "model_id", DEFAULT_INSTRUCT_MODEL ) load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", True) load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0) super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace instruct model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [] for text in texts: instruction_pairs.append([self.embed_instruction, text]) embeddings = self.client(self.pipeline_ref, instruction_pairs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace instruct model. Args: text: The text to embed. Returns: Embeddings for the text.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
8471585f7f68-4
text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = [self.query_instruction, text] embedding = self.client(self.pipeline_ref, [instruction_pair])[0] return embedding.tolist() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
63d6df08bb66-0
Source code for langchain.embeddings.fake from typing import List import numpy as np from pydantic import BaseModel from langchain.embeddings.base import Embeddings [docs]class FakeEmbeddings(Embeddings, BaseModel): size: int def _get_embedding(self) -> List[float]: return list(np.random.normal(size=self.size)) [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: return [self._get_embedding() for _ in texts] [docs] def embed_query(self, text: str) -> List[float]: return self._get_embedding() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/fake.html
5c42471df863-0
Source code for langchain.embeddings.cohere """Wrapper around Cohere embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class CohereEmbeddings(BaseModel, Embeddings): """Wrapper around Cohere embedding models. To use, you should have the ``cohere`` python package installed, and the environment variable ``COHERE_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import CohereEmbeddings cohere = CohereEmbeddings(model="medium", cohere_api_key="my-api-key") """ client: Any #: :meta private: model: str = "large" """Model name to use.""" truncate: Optional[str] = None """Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")""" cohere_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ValueError( "Could not import cohere python package. "
https://python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html
5c42471df863-1
raise ValueError( "Could not import cohere python package. " "Please install it with `pip install cohere`." ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Cohere's embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = self.client.embed( model=self.model, texts=texts, truncate=self.truncate ).embeddings return [list(map(float, e)) for e in embeddings] [docs] def embed_query(self, text: str) -> List[float]: """Call out to Cohere's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ embedding = self.client.embed( model=self.model, texts=[text], truncate=self.truncate ).embeddings[0] return list(map(float, embedding)) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html
893a04d9778e-0
Source code for langchain.embeddings.self_hosted """Running custom embedding models on self-hosted remote hardware.""" from typing import Any, Callable, List from pydantic import Extra from langchain.embeddings.base import Embeddings from langchain.llms import SelfHostedPipeline def _embed_documents(pipeline: Any, *args: Any, **kwargs: Any) -> List[List[float]]: """Inference function to send to the remote hardware. Accepts a sentence_transformer model_id and returns a list of embeddings for each document in the batch. """ return pipeline(*args, **kwargs) [docs]class SelfHostedEmbeddings(SelfHostedPipeline, Embeddings): """Runs custom embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example using a model load function: .. code-block:: python from langchain.embeddings import SelfHostedEmbeddings from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") def get_pipeline(): model_id = "facebook/bart-large" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) return pipeline("feature-extraction", model=model, tokenizer=tokenizer) embeddings = SelfHostedEmbeddings( model_load_fn=get_pipeline, hardware=gpu
https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
893a04d9778e-1
model_load_fn=get_pipeline, hardware=gpu model_reqs=["./", "torch", "transformers"], ) Example passing in a pipeline path: .. code-block:: python from langchain.embeddings import SelfHostedHFEmbeddings import runhouse as rh from transformers import pipeline gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") pipeline = pipeline(model="bert-base-uncased", task="feature-extraction") rh.blob(pickle.dumps(pipeline), path="models/pipeline.pkl").save().to(gpu, path="models") embeddings = SelfHostedHFEmbeddings.from_pipeline( pipeline="models/pipeline.pkl", hardware=gpu, model_reqs=["./", "torch", "transformers"], ) """ inference_fn: Callable = _embed_documents """Inference function to extract the embeddings on the remote hardware.""" inference_kwargs: Any = None """Any kwargs to pass to the model's inference function.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed.s Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.client(self.pipeline_ref, texts) if not isinstance(embeddings, list): return embeddings.tolist() return embeddings [docs] def embed_query(self, text: str) -> List[float]:
https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
893a04d9778e-2
[docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace transformer model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embeddings = self.client(self.pipeline_ref, text) if not isinstance(embeddings, list): return embeddings.tolist() return embeddings By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
84f22e0feb26-0
Source code for langchain.embeddings.tensorflow_hub """Wrapper around TensorflowHub embedding models.""" from typing import Any, List from pydantic import BaseModel, Extra from langchain.embeddings.base import Embeddings DEFAULT_MODEL_URL = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3" [docs]class TensorflowHubEmbeddings(BaseModel, Embeddings): """Wrapper around tensorflow_hub embedding models. To use, you should have the ``tensorflow_text`` python package installed. Example: .. code-block:: python from langchain.embeddings import TensorflowHubEmbeddings url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3" tf = TensorflowHubEmbeddings(model_url=url) """ embed: Any #: :meta private: model_url: str = DEFAULT_MODEL_URL """Model name to use.""" def __init__(self, **kwargs: Any): """Initialize the tensorflow_hub and tensorflow_text.""" super().__init__(**kwargs) try: import tensorflow_hub import tensorflow_text # noqa self.embed = tensorflow_hub.load(self.model_url) except ImportError as e: raise ValueError( "Could not import some python packages." "Please install them." ) from e class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a TensorflowHub embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """
https://python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html
84f22e0feb26-1
Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.embed(texts).numpy() return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a TensorflowHub embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embedding = self.embed(text).numpy()[0] return embedding.tolist() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html
0326e829580b-0
Source code for langchain.embeddings.sagemaker_endpoint """Wrapper around Sagemaker InvokeEndpoint API.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.llms.sagemaker_endpoint import ContentHandlerBase [docs]class SagemakerEndpointEmbeddings(BaseModel, Embeddings): """Wrapper around custom Sagemaker Inference Endpoints. To use, you must supply the endpoint name from your deployed Sagemaker model & the region where it is deployed. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Sagemaker endpoint. See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html """ """ Example: .. code-block:: python from langchain.embeddings import SagemakerEndpointEmbeddings endpoint_name = ( "my-endpoint-name" ) region_name = ( "us-west-2" ) credentials_profile_name = ( "default" ) se = SagemakerEndpointEmbeddings( endpoint_name=endpoint_name, region_name=region_name, credentials_profile_name=credentials_profile_name ) """ client: Any #: :meta private: endpoint_name: str = "" """The name of the endpoint from the deployed Sagemaker model.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
0326e829580b-1
"""The name of the endpoint from the deployed Sagemaker model. Must be unique within an AWS Region.""" region_name: str = "" """The aws region where the Sagemaker model is deployed, eg. `us-west-2`.""" credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ content_handler: ContentHandlerBase """The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. """ """ Example: .. code-block:: python from langchain.llms.sagemaker_endpoint import ContentHandlerBase class ContentHandler(ContentHandlerBase): content_type = "application/json" accepts = "application/json" def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes: input_str = json.dumps({prompt: prompt, **model_kwargs}) return input_str.encode('utf-8') def transform_output(self, output: bytes) -> str: response_json = json.loads(output.read().decode("utf-8")) return response_json[0]["generated_text"] """ model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint
https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
0326e829580b-2
endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint function. See `boto3`_. docs for more info. .. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html> """ class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" try: import boto3 try: if values["credentials_profile_name"] is not None: session = boto3.Session( profile_name=values["credentials_profile_name"] ) else: # use default credentials session = boto3.Session() values["client"] = session.client( "sagemaker-runtime", region_name=values["region_name"] ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e except ImportError: raise ValueError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) return values def _embedding_func(self, texts: List[str]) -> List[float]: """Call out to SageMaker Inference embedding endpoint.""" # replace newlines, which can negatively affect performance. texts = list(map(lambda x: x.replace("\n", " "), texts)) _model_kwargs = self.model_kwargs or {} _endpoint_kwargs = self.endpoint_kwargs or {}
https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
0326e829580b-3
_endpoint_kwargs = self.endpoint_kwargs or {} body = self.content_handler.transform_input(texts, _model_kwargs) content_type = self.content_handler.content_type accepts = self.content_handler.accepts # send request try: response = self.client.invoke_endpoint( EndpointName=self.endpoint_name, Body=body, ContentType=content_type, Accept=accepts, **_endpoint_kwargs, ) except Exception as e: raise ValueError(f"Error raised by inference endpoint: {e}") return self.content_handler.transform_output(response["Body"]) [docs] def embed_documents( self, texts: List[str], chunk_size: int = 64 ) -> List[List[float]]: """Compute doc embeddings using a SageMaker Inference Endpoint. Args: texts: The list of texts to embed. chunk_size: The chunk size defines how many input texts will be grouped together as request. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ results = [] _chunk_size = len(texts) if chunk_size > len(texts) else chunk_size for i in range(0, len(texts), _chunk_size): response = self._embedding_func(texts[i : i + _chunk_size]) results.append(response) return results [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a SageMaker inference endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embedding_func([text]) By Harrison Chase
https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
0326e829580b-4
""" return self._embedding_func([text]) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
61c12327fb85-0
.rst .pdf Chains Chains# Note Conceptual Guide Using an LLM in isolation is fine for some simple applications, but many more complex ones require chaining LLMs - either with each other or with other experts. LangChain provides a standard interface for Chains, as well as some common implementations of chains for ease of use. The following sections of documentation are provided: Getting Started: A getting started guide for chains, to get you up and running quickly. How-To Guides: A collection of how-to guides. These highlight how to use various types of chains. Reference: API reference documentation for all Chain classes. previous Redis Chat Message History next Getting Started By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/chains.html
41654bb70720-0
.rst .pdf Agents Contents Go Deeper Agents# Note Conceptual Guide Some applications will require not just a predetermined chain of calls to LLMs/other tools, but potentially an unknown chain that depends on the user’s input. In these types of chains, there is a “agent” which has access to a suite of tools. Depending on the user input, the agent can then decide which, if any, of these tools to call. In this section of documentation, we first start with a Getting Started notebook to cover how to use all things related to agents in an end-to-end manner. We then split the documentation into the following sections: Tools An overview of the various tools LangChain supports. Agents An overview of the different agent types. Toolkits An overview of toolkits, and examples of the different ones LangChain supports. Agent Executor An overview of the Agent Executor class and examples of how to use it. Go Deeper# Tools Agents Toolkits Agent Executors previous Chains next Getting Started Contents Go Deeper By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/agents.html
82e63ea6351b-0
.rst .pdf Memory Memory# Note Conceptual Guide By default, Chains and Agents are stateless, meaning that they treat each incoming query independently (as are the underlying LLMs and chat models). In some applications (chatbots being a GREAT example) it is highly important to remember previous interactions, both at a short term but also at a long term level. The concept of “Memory” exists to do exactly that. LangChain provides memory components in two forms. First, LangChain provides helper utilities for managing and manipulating previous chat messages. These are designed to be modular and useful regardless of how they are used. Secondly, LangChain provides easy ways to incorporate these utilities into chains. The following sections of documentation are provided: Getting Started: An overview of how to get started with different types of memory. How-To Guides: A collection of how-to guides. These highlight different types of memory, as well as how to use memory in chains. Memory Getting Started How-To Guides previous Weaviate Hybrid Search next Getting Started By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/memory.html
52f160f8f47d-0
.rst .pdf Indexes Contents Go Deeper Indexes# Note Conceptual Guide Indexes refer to ways to structure documents so that LLMs can best interact with them. This module contains utility functions for working with documents, different types of indexes, and then examples for using those indexes in chains. The most common way that indexes are used in chains is in a “retrieval” step. This step refers to taking a user’s query and returning the most relevant documents. We draw this distinction because (1) an index can be used for other things besides retrieval, and (2) retrieval can use other logic besides an index to find relevant documents. We therefore have a concept of a “Retriever” interface - this is the interface that most chains work with. Most of the time when we talk about indexes and retrieval we are talking about indexing and retrieving unstructured data (like text documents). For interacting with structured data (SQL tables, etc) or APIs, please see the corresponding use case sections for links to relevant functionality. The primary index and retrieval types supported by LangChain are currently centered around vector databases, and therefore a lot of the functionality we dive deep on those topics. For an overview of everything related to this, please see the below notebook for getting started: Getting Started We then provide a deep dive on the four main components. Document Loaders How to load documents from a variety of sources. Text Splitters An overview of the abstractions and implementions around splitting text. VectorStores An overview of VectorStores and the many integrations LangChain provides. Retrievers An overview of Retrievers and the implementations LangChain provides. Go Deeper# Document Loaders Text Splitters Vectorstores Retrievers previous Structured Output Parser next Getting Started Contents Go Deeper
https://python.langchain.com/en/latest/modules/indexes.html
52f160f8f47d-1
previous Structured Output Parser next Getting Started Contents Go Deeper By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/indexes.html
51d361bdc35f-0
.rst .pdf Models Contents Go Deeper Models# Note Conceptual Guide This section of the documentation deals with different types of models that are used in LangChain. On this page we will go over the model types at a high level, but we have individual pages for each model type. The pages contain more detailed “how-to” guides for working with that model, as well as a list of different model providers. LLMs Large Language Models (LLMs) are the first type of models we cover. These models take a text string as input, and return a text string as output. Chat Models Chat Models are the second type of models we cover. These models are usually backed by a language model, but their APIs are more structured. Specifically, these models take a list of Chat Messages as input, and return a Chat Message. Text Embedding Models The third type of models we cover are text embedding models. These models take text as input and return a list of floats. Go Deeper# LLMs Chat Models Text Embedding Models previous Quickstart Guide next LLMs Contents Go Deeper By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/models.html
ce9c50b0133a-0
.rst .pdf Prompts Contents Go Deeper Prompts# Note Conceptual Guide The new way of programming models is through prompts. A “prompt” refers to the input to the model. This input is rarely hard coded, but rather is often constructed from multiple components. A PromptTemplate is responsible for the construction of this input. LangChain provides several classes and functions to make constructing and working with prompts easy. This section of documentation is split into four sections: LLM Prompt Templates How to use PromptTemplates to prompt Language Models. Chat Prompt Templates How to use PromptTemplates to prompt Chat Models. Example Selectors Often times it is useful to include examples in prompts. These examples can be hardcoded, but it is often more powerful if they are dynamically selected. This section goes over example selection. Output Parsers Language models (and Chat Models) output text. But many times you may want to get more structured information than just text back. This is where output parsers come in. Output Parsers are responsible for (1) instructing the model how output should be formatted, (2) parsing output into the desired formatting (including retrying if necessary). Go Deeper# Prompt Templates Chat Prompt Template Example Selectors Output Parsers previous TensorflowHub next Prompt Templates Contents Go Deeper By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/prompts.html
0f0dd40120dc-0
.rst .pdf Prompt Templates Prompt Templates# Note Conceptual Guide Language models take text as input - that text is commonly referred to as a prompt. Typically this is not simply a hardcoded string but rather a combination of a template, some examples, and user input. LangChain provides several classes and functions to make constructing and working with prompts easy. The following sections of documentation are provided: Getting Started: An overview of all the functionality LangChain provides for working with and constructing prompts. How-To Guides: A collection of how-to guides. These highlight how to accomplish various objectives with our prompt class. Reference: API reference documentation for all prompt classes. previous Prompts next Getting Started By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/prompts/prompt_templates.html
1d71a6898950-0
.rst .pdf Output Parsers Output Parsers# Note Conceptual Guide Language models output text. But many times you may want to get more structured information than just text back. This is where output parsers come in. Output parsers are classes that help structure language model responses. There are two main methods an output parser must implement: get_format_instructions() -> str: A method which returns a string containing instructions for how the output of a language model should be formatted. parse(str) -> Any: A method which takes in a string (assumed to be the response from a language model) and parses it into some structure. And then one optional one: parse_with_prompt(str) -> Any: A method which takes in a string (assumed to be the response from a language model) and a prompt (assumed to the prompt that generated such a response) and parses it into some structure. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. To start, we recommend familiarizing yourself with the Getting Started section Output Parsers After that, we provide deep dives on all the different types of output parsers. CommaSeparatedListOutputParser OutputFixingParser PydanticOutputParser RetryOutputParser Structured Output Parser previous Similarity ExampleSelector next Output Parsers By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/prompts/output_parsers.html
601199d56b87-0
.ipynb .pdf Chat Prompt Template Chat Prompt Template# Chat Models takes a list of chat messages as input - this list commonly referred to as a prompt. Typically this is not simply a hardcoded list of messages but rather a combination of a template, some examples, and user input. LangChain provides several classes and functions to make constructing and working with prompts easy. from langchain.prompts import ( ChatPromptTemplate, PromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate, ) from langchain.schema import ( AIMessage, HumanMessage, SystemMessage ) You can make use of templating by using a MessagePromptTemplate. You can build a ChatPromptTemplate from one or more MessagePromptTemplates. You can use ChatPromptTemplate’s format_prompt – this returns a PromptValue, which you can convert to a string or Message object, depending on whether you want to use the formatted value as input to an llm or chat model. For convenience, there is a from_template method exposed on the template. If you were to use this template, this is what it would look like: template="You are a helpful assistant that translates {input_language} to {output_language}." system_message_prompt = SystemMessagePromptTemplate.from_template(template) human_template="{text}" human_message_prompt = HumanMessagePromptTemplate.from_template(human_template) chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt]) # get a chat completion from the formatted messages chat_prompt.format_prompt(input_language="English", output_language="French", text="I love programming.").to_messages() [SystemMessage(content='You are a helpful assistant that translates English to French.', additional_kwargs={}), HumanMessage(content='I love programming.', additional_kwargs={})]
https://python.langchain.com/en/latest/modules/prompts/chat_prompt_template.html
601199d56b87-1
HumanMessage(content='I love programming.', additional_kwargs={})] If you wanted to construct the MessagePromptTemplate more directly, you could create a PromptTemplate outside and then pass it in, eg: prompt=PromptTemplate( template="You are a helpful assistant that translates {input_language} to {output_language}.", input_variables=["input_language", "output_language"], ) system_message_prompt = SystemMessagePromptTemplate(prompt=prompt) previous Example Selector next Example Selectors By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/prompts/chat_prompt_template.html
05658f408b34-0
.rst .pdf Example Selectors Example Selectors# Note Conceptual Guide If you have a large number of examples, you may need to select which ones to include in the prompt. The ExampleSelector is the class responsible for doing so. The base interface is defined as below: class BaseExampleSelector(ABC): """Interface for selecting examples to include in prompts.""" @abstractmethod def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Select which examples to use based on the inputs.""" The only method it needs to expose is a select_examples method. This takes in the input variables and then returns a list of examples. It is up to each specific implementation as to how those examples are selected. Let’s take a look at some below. See below for a list of example selectors. How to create a custom example selector LengthBased ExampleSelector Maximal Marginal Relevance ExampleSelector NGram Overlap ExampleSelector Similarity ExampleSelector previous Chat Prompt Template next How to create a custom example selector By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/prompts/example_selectors.html
3c82f533e90d-0
.rst .pdf How-To Guides How-To Guides# If you’re new to the library, you may want to start with the Quickstart. The user guide here shows more advanced workflows and how to use the library in different ways. How to create a custom prompt template How to create a prompt template that uses few shot examples How to work with partial Prompt Templates How to serialize prompts previous Getting Started next How to create a custom prompt template By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/how_to_guides.html
0592885aabb8-0
.md .pdf Getting Started Contents What is a prompt template? Create a prompt template Load a prompt template from LangChainHub Pass few shot examples to a prompt template Select examples for a prompt template Getting Started# In this tutorial, we will learn about: what a prompt template is, and why it is needed, how to create a prompt template, how to pass few shot examples to a prompt template, how to select examples for a prompt template. What is a prompt template?# A prompt template refers to a reproducible way to generate a prompt. It contains a text string (“the template”), that can take in a set of parameters from the end user and generate a prompt. The prompt template may contain: instructions to the language model, a set of few shot examples to help the language model generate a better response, a question to the language model. The following code snippet contains an example of a prompt template: from langchain import PromptTemplate template = """ I want you to act as a naming consultant for new companies. Here are some examples of good company names: - search engine, Google - social media, Facebook - video sharing, YouTube The name should be short, catchy and easy to remember. What is a good name for a company that makes {product}? """ prompt = PromptTemplate( input_variables=["product"], template=template, ) Create a prompt template# You can create simple hardcoded prompts using the PromptTemplate class. Prompt templates can take any number of input variables, and can be formatted to generate a prompt. from langchain import PromptTemplate # An example prompt with no input variables no_input_prompt = PromptTemplate(input_variables=[], template="Tell me a joke.") no_input_prompt.format() # -> "Tell me a joke."
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/getting_started.html
0592885aabb8-1
no_input_prompt.format() # -> "Tell me a joke." # An example prompt with one input variable one_input_prompt = PromptTemplate(input_variables=["adjective"], template="Tell me a {adjective} joke.") one_input_prompt.format(adjective="funny") # -> "Tell me a funny joke." # An example prompt with multiple input variables multiple_input_prompt = PromptTemplate( input_variables=["adjective", "content"], template="Tell me a {adjective} joke about {content}." ) multiple_input_prompt.format(adjective="funny", content="chickens") # -> "Tell me a funny joke about chickens." You can create custom prompt templates that format the prompt in any way you want. For more information, see Custom Prompt Templates. Note Currently, the template should be formatted as a Python f-string. We also support Jinja2 templates (see Using Jinja templates). In the future, we will support more templating languages such as Mako. Load a prompt template from LangChainHub# LangChainHub contains a collection of prompts which can be loaded directly via LangChain. from langchain.prompts import load_prompt prompt = load_prompt("lc://prompts/conversation/prompt.json") prompt.format(history="", input="What is 1 + 1?") You can read more about LangChainHub and the prompts available with it here. Pass few shot examples to a prompt template# Few shot examples are a set of examples that can be used to help the language model generate a better response. To generate a prompt with few shot examples, you can use the FewShotPromptTemplate. This class takes in a PromptTemplate and a list of few shot examples. It then formats the prompt template with the few shot examples. In this example, we’ll create a prompt to generate word antonyms.
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/getting_started.html
0592885aabb8-2
In this example, we’ll create a prompt to generate word antonyms. from langchain import PromptTemplate, FewShotPromptTemplate # First, create the list of few shot examples. examples = [ {"word": "happy", "antonym": "sad"}, {"word": "tall", "antonym": "short"}, ] # Next, we specify the template to format the examples we have provided. # We use the `PromptTemplate` class for this. example_formatter_template = """ Word: {word} Antonym: {antonym}\n """ example_prompt = PromptTemplate( input_variables=["word", "antonym"], template=example_formatter_template, ) # Finally, we create the `FewShotPromptTemplate` object. few_shot_prompt = FewShotPromptTemplate( # These are the examples we want to insert into the prompt. examples=examples, # This is how we want to format the examples when we insert them into the prompt. example_prompt=example_prompt, # The prefix is some text that goes before the examples in the prompt. # Usually, this consists of intructions. prefix="Give the antonym of every input", # The suffix is some text that goes after the examples in the prompt. # Usually, this is where the user input will go suffix="Word: {input}\nAntonym:", # The input variables are the variables that the overall prompt expects. input_variables=["input"], # The example_separator is the string we will use to join the prefix, examples, and suffix together with. example_separator="\n\n", ) # We can now generate a prompt using the `format` method. print(few_shot_prompt.format(input="big"))
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/getting_started.html
0592885aabb8-3
print(few_shot_prompt.format(input="big")) # -> Give the antonym of every input # -> # -> Word: happy # -> Antonym: sad # -> # -> Word: tall # -> Antonym: short # -> # -> Word: big # -> Antonym: Select examples for a prompt template# If you have a large number of examples, you can use the ExampleSelector to select a subset of examples that will be most informative for the Language Model. This will help you generate a prompt that is more likely to generate a good response. Below, we’ll use the LengthBasedExampleSelector, which selects examples based on the length of the input. This is useful when you are worried about constructing a prompt that will go over the length of the context window. For longer inputs, it will select fewer examples to include, while for shorter inputs it will select more. We’ll continue with the example from the previous section, but this time we’ll use the LengthBasedExampleSelector to select the examples. from langchain.prompts.example_selector import LengthBasedExampleSelector # These are a lot of examples of a pretend task of creating antonyms. examples = [ {"word": "happy", "antonym": "sad"}, {"word": "tall", "antonym": "short"}, {"word": "energetic", "antonym": "lethargic"}, {"word": "sunny", "antonym": "gloomy"}, {"word": "windy", "antonym": "calm"}, ] # We'll use the `LengthBasedExampleSelector` to select the examples. example_selector = LengthBasedExampleSelector( # These are the examples is has available to choose from. examples=examples,
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/getting_started.html
0592885aabb8-4
# These are the examples is has available to choose from. examples=examples, # This is the PromptTemplate being used to format the examples. example_prompt=example_prompt, # This is the maximum length that the formatted examples should be. # Length is measured by the get_text_length function below. max_length=25, ) # We can now use the `example_selector` to create a `FewShotPromptTemplate`. dynamic_prompt = FewShotPromptTemplate( # We provide an ExampleSelector instead of examples. example_selector=example_selector, example_prompt=example_prompt, prefix="Give the antonym of every input", suffix="Word: {input}\nAntonym:", input_variables=["input"], example_separator="\n\n", ) # We can now generate a prompt using the `format` method. print(dynamic_prompt.format(input="big")) # -> Give the antonym of every input # -> # -> Word: happy # -> Antonym: sad # -> # -> Word: tall # -> Antonym: short # -> # -> Word: energetic # -> Antonym: lethargic # -> # -> Word: sunny # -> Antonym: gloomy # -> # -> Word: windy # -> Antonym: calm # -> # -> Word: big # -> Antonym: In contrast, if we provide a very long input, the LengthBasedExampleSelector will select fewer examples to include in the prompt. long_string = "big and huge and massive and large and gigantic and tall and much much much much much bigger than everything else" print(dynamic_prompt.format(input=long_string)) # -> Give the antonym of every input # -> Word: happy # -> Antonym: sad
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/getting_started.html
0592885aabb8-5
# -> Word: happy # -> Antonym: sad # -> # -> Word: big and huge and massive and large and gigantic and tall and much much much much much bigger than everything else # -> Antonym: LangChain comes with a few example selectors that you can use. For more details on how to use them, see Example Selectors. You can create custom example selectors that select examples based on any criteria you want. For more details on how to do this, see Creating a custom example selector. previous Prompt Templates next How-To Guides Contents What is a prompt template? Create a prompt template Load a prompt template from LangChainHub Pass few shot examples to a prompt template Select examples for a prompt template By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/getting_started.html
fca02fc776e1-0
.ipynb .pdf How to work with partial Prompt Templates Contents Partial With Strings Partial With Functions How to work with partial Prompt Templates# A prompt template is a class with a .format method which takes in a key-value map and returns a string (a prompt) to pass to the language model. Like other methods, it can make sense to “partial” a prompt template - eg pass in a subset of the required values, as to create a new prompt template which expects only the remaining subset of values. LangChain supports this in two ways: we allow for partially formatted prompts (1) with string values, (2) with functions that return string values. These two different ways support different use cases. In the documentation below we go over the motivations for both use cases as well as how to do it in LangChain. Partial With Strings# One common use case for wanting to partial a prompt template is if you get some of the variables before others. For example, suppose you have a prompt template that requires two variables, foo and baz. If you get the foo value early on in the chain, but the baz value later, it can be annoying to wait until you have both variables in the same place to pass them to the prompt template. Instead, you can partial the prompt template with the foo value, and then pass the partialed prompt template along and just use that. Below is an example of doing this: from langchain.prompts import PromptTemplate prompt = PromptTemplate(template="{foo}{bar}", input_variables=["foo", "bar"]) partial_prompt = prompt.partial(foo="foo"); print(partial_prompt.format(bar="baz")) foobaz You can also just initialize the prompt with the partialed variables. prompt = PromptTemplate(template="{foo}{bar}", input_variables=["bar"], partial_variables={"foo": "foo"}) print(prompt.format(bar="baz")) foobaz
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/examples/partial.html
fca02fc776e1-1
print(prompt.format(bar="baz")) foobaz Partial With Functions# The other common use is to partial with a function. The use case for this is when you have a variable you know that you always want to fetch in a common way. A prime example of this is with date or time. Imagine you have a prompt which you always want to have the current date. You can’t hard code it in the prompt, and passing it along with the other input variables is a bit annoying. In this case, it’s very handy to be able to partial the prompt with a function that always returns the current date. from datetime import datetime def _get_datetime(): now = datetime.now() return now.strftime("%m/%d/%Y, %H:%M:%S") prompt = PromptTemplate( template="Tell me a {adjective} joke about the day {date}", input_variables=["adjective", "date"] ); partial_prompt = prompt.partial(date=_get_datetime) print(partial_prompt.format(adjective="funny")) Tell me a funny joke about the day 02/27/2023, 22:15:16 You can also just initialize the prompt with the partialed variables, which often makes more sense in this workflow. prompt = PromptTemplate( template="Tell me a {adjective} joke about the day {date}", input_variables=["adjective"], partial_variables={"date": _get_datetime} ); print(prompt.format(adjective="funny")) Tell me a funny joke about the day 02/27/2023, 22:15:16 previous How to create a prompt template that uses few shot examples next How to serialize prompts Contents Partial With Strings Partial With Functions By Harrison Chase
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/examples/partial.html
fca02fc776e1-2
Contents Partial With Strings Partial With Functions By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/examples/partial.html
924a3434a165-0
.ipynb .pdf How to create a custom prompt template Contents Why are custom prompt templates needed? Creating a Custom Prompt Template Use the custom prompt template How to create a custom prompt template# Let’s suppose we want the LLM to generate English language explanations of a function given its name. To achieve this task, we will create a custom prompt template that takes in the function name as input, and formats the prompt template to provide the source code of the function. Why are custom prompt templates needed?# LangChain provides a set of default prompt templates that can be used to generate prompts for a variety of tasks. However, there may be cases where the default prompt templates do not meet your needs. For example, you may want to create a prompt template with specific dynamic instructions for your language model. In such cases, you can create a custom prompt template. Take a look at the current set of default prompt templates here. Creating a Custom Prompt Template# There are essentially two distinct prompt templates available - string prompt templates and chat prompt templates. String prompt templates provides a simple prompt in string format, while chat prompt templates produces a more structured prompt to be used with a chat API. In this guide, we will create a custom prompt using a string prompt template. To create a custom string prompt template, there are two requirements: It has an input_variables attribute that exposes what input variables the prompt template expects. It exposes a format method that takes in keyword arguments corresponding to the expected input_variables and returns the formatted prompt. We will create a custom prompt template that takes in the function name as input and formats the prompt to provide the source code of the function. To achieve this, let’s first create a function that will return the source code of a function given its name. import inspect def get_source_code(function_name): # Get the source code of the function
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/examples/custom_prompt_template.html
924a3434a165-1
import inspect def get_source_code(function_name): # Get the source code of the function return inspect.getsource(function_name) Next, we’ll create a custom prompt template that takes in the function name as input, and formats the prompt template to provide the source code of the function. from langchain.prompts import StringPromptTemplate from pydantic import BaseModel, validator class FunctionExplainerPromptTemplate(StringPromptTemplate, BaseModel): """ A custom prompt template that takes in the function name as input, and formats the prompt template to provide the source code of the function. """ @validator("input_variables") def validate_input_variables(cls, v): """ Validate that the input variables are correct. """ if len(v) != 1 or "function_name" not in v: raise ValueError("function_name must be the only input_variable.") return v def format(self, **kwargs) -> str: # Get the source code of the function source_code = get_source_code(kwargs["function_name"]) # Generate the prompt to be sent to the language model prompt = f""" Given the function name and source code, generate an English language explanation of the function. Function Name: {kwargs["function_name"].__name__} Source Code: {source_code} Explanation: """ return prompt def _prompt_type(self): return "function-explainer" Use the custom prompt template# Now that we have created a custom prompt template, we can use it to generate prompts for our task. fn_explainer = FunctionExplainerPromptTemplate(input_variables=["function_name"]) # Generate a prompt for the function "get_source_code" prompt = fn_explainer.format(function_name=get_source_code) print(prompt)
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/examples/custom_prompt_template.html
924a3434a165-2
prompt = fn_explainer.format(function_name=get_source_code) print(prompt) Given the function name and source code, generate an English language explanation of the function. Function Name: get_source_code Source Code: def get_source_code(function_name): # Get the source code of the function return inspect.getsource(function_name) Explanation: previous How-To Guides next How to create a prompt template that uses few shot examples Contents Why are custom prompt templates needed? Creating a Custom Prompt Template Use the custom prompt template By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/examples/custom_prompt_template.html
0065c6218ff1-0
.ipynb .pdf How to create a prompt template that uses few shot examples Contents Use Case Using an example set Create the example set Create a formatter for the few shot examples Feed examples and formatter to FewShotPromptTemplate Using an example selector Feed examples into ExampleSelector Feed example selector into FewShotPromptTemplate How to create a prompt template that uses few shot examples# In this tutorial, we’ll learn how to create a prompt template that uses few shot examples. We’ll use the FewShotPromptTemplate class to create a prompt template that uses few shot examples. This class either takes in a set of examples, or an ExampleSelector object. In this tutorial, we’ll go over both options. Use Case# In this tutorial, we’ll configure few shot examples for self-ask with search. Using an example set# Create the example set# To get started, create a list of few shot examples. Each example should be a dictionary with the keys being the input variables and the values being the values for those input variables. from langchain.prompts.few_shot import FewShotPromptTemplate from langchain.prompts.prompt import PromptTemplate examples = [ { "question": "Who lived longer, Muhammad Ali or Alan Turing?", "answer": """ Are follow up questions needed here: Yes. Follow up: How old was Muhammad Ali when he died? Intermediate answer: Muhammad Ali was 74 years old when he died. Follow up: How old was Alan Turing when he died? Intermediate answer: Alan Turing was 41 years old when he died. So the final answer is: Muhammad Ali """ }, { "question": "When was the founder of craigslist born?", "answer": """ Are follow up questions needed here: Yes.
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/examples/few_shot_examples.html
0065c6218ff1-1
"answer": """ Are follow up questions needed here: Yes. Follow up: Who was the founder of craigslist? Intermediate answer: Craigslist was founded by Craig Newmark. Follow up: When was Craig Newmark born? Intermediate answer: Craig Newmark was born on December 6, 1952. So the final answer is: December 6, 1952 """ }, { "question": "Who was the maternal grandfather of George Washington?", "answer": """ Are follow up questions needed here: Yes. Follow up: Who was the mother of George Washington? Intermediate answer: The mother of George Washington was Mary Ball Washington. Follow up: Who was the father of Mary Ball Washington? Intermediate answer: The father of Mary Ball Washington was Joseph Ball. So the final answer is: Joseph Ball """ }, { "question": "Are both the directors of Jaws and Casino Royale from the same country?", "answer": """ Are follow up questions needed here: Yes. Follow up: Who is the director of Jaws? Intermediate Answer: The director of Jaws is Steven Spielberg. Follow up: Where is Steven Spielberg from? Intermediate Answer: The United States. Follow up: Who is the director of Casino Royale? Intermediate Answer: The director of Casino Royale is Martin Campbell. Follow up: Where is Martin Campbell from? Intermediate Answer: New Zealand. So the final answer is: No """ } ] Create a formatter for the few shot examples# Configure a formatter that will format the few shot examples into a string. This formatter should be a PromptTemplate object. example_prompt = PromptTemplate(input_variables=["question", "answer"], template="Question: {question}\n{answer}")
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/examples/few_shot_examples.html
0065c6218ff1-2
print(example_prompt.format(**examples[0])) Question: Who lived longer, Muhammad Ali or Alan Turing? Are follow up questions needed here: Yes. Follow up: How old was Muhammad Ali when he died? Intermediate answer: Muhammad Ali was 74 years old when he died. Follow up: How old was Alan Turing when he died? Intermediate answer: Alan Turing was 41 years old when he died. So the final answer is: Muhammad Ali Feed examples and formatter to FewShotPromptTemplate# Finally, create a FewShotPromptTemplate object. This object takes in the few shot examples and the formatter for the few shot examples. prompt = FewShotPromptTemplate( examples=examples, example_prompt=example_prompt, suffix="Question: {input}", input_variables=["input"] ) print(prompt.format(input="Who was the father of Mary Ball Washington?")) Question: Who lived longer, Muhammad Ali or Alan Turing? Are follow up questions needed here: Yes. Follow up: How old was Muhammad Ali when he died? Intermediate answer: Muhammad Ali was 74 years old when he died. Follow up: How old was Alan Turing when he died? Intermediate answer: Alan Turing was 41 years old when he died. So the final answer is: Muhammad Ali Question: When was the founder of craigslist born? Are follow up questions needed here: Yes. Follow up: Who was the founder of craigslist? Intermediate answer: Craigslist was founded by Craig Newmark. Follow up: When was Craig Newmark born? Intermediate answer: Craig Newmark was born on December 6, 1952. So the final answer is: December 6, 1952 Question: Who was the maternal grandfather of George Washington? Are follow up questions needed here: Yes.
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/examples/few_shot_examples.html
0065c6218ff1-3
Are follow up questions needed here: Yes. Follow up: Who was the mother of George Washington? Intermediate answer: The mother of George Washington was Mary Ball Washington. Follow up: Who was the father of Mary Ball Washington? Intermediate answer: The father of Mary Ball Washington was Joseph Ball. So the final answer is: Joseph Ball Question: Are both the directors of Jaws and Casino Royale from the same country? Are follow up questions needed here: Yes. Follow up: Who is the director of Jaws? Intermediate Answer: The director of Jaws is Steven Spielberg. Follow up: Where is Steven Spielberg from? Intermediate Answer: The United States. Follow up: Who is the director of Casino Royale? Intermediate Answer: The director of Casino Royale is Martin Campbell. Follow up: Where is Martin Campbell from? Intermediate Answer: New Zealand. So the final answer is: No Question: Who was the father of Mary Ball Washington? Using an example selector# Feed examples into ExampleSelector# We will reuse the example set and the formatter from the previous section. However, instead of feeding the examples directly into the FewShotPromptTemplate object, we will feed them into an ExampleSelector object. In this tutorial, we will use the SemanticSimilarityExampleSelector class. This class selects few shot examples based on their similarity to the input. It uses an embedding model to compute the similarity between the input and the few shot examples, as well as a vector store to perform the nearest neighbor search. from langchain.prompts.example_selector import SemanticSimilarityExampleSelector from langchain.vectorstores import Chroma from langchain.embeddings import OpenAIEmbeddings example_selector = SemanticSimilarityExampleSelector.from_examples( # This is the list of examples available to select from. examples,
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/examples/few_shot_examples.html
0065c6218ff1-4
# This is the list of examples available to select from. examples, # This is the embedding class used to produce embeddings which are used to measure semantic similarity. OpenAIEmbeddings(), # This is the VectorStore class that is used to store the embeddings and do a similarity search over. Chroma, # This is the number of examples to produce. k=1 ) # Select the most similar example to the input. question = "Who was the father of Mary Ball Washington?" selected_examples = example_selector.select_examples({"question": question}) print(f"Examples most similar to the input: {question}") for example in selected_examples: print("\n") for k, v in example.items(): print(f"{k}: {v}") Running Chroma using direct local API. Using DuckDB in-memory for database. Data will be transient. Examples most similar to the input: Who was the father of Mary Ball Washington? question: Who was the maternal grandfather of George Washington? answer: Are follow up questions needed here: Yes. Follow up: Who was the mother of George Washington? Intermediate answer: The mother of George Washington was Mary Ball Washington. Follow up: Who was the father of Mary Ball Washington? Intermediate answer: The father of Mary Ball Washington was Joseph Ball. So the final answer is: Joseph Ball Feed example selector into FewShotPromptTemplate# Finally, create a FewShotPromptTemplate object. This object takes in the example selector and the formatter for the few shot examples. prompt = FewShotPromptTemplate( example_selector=example_selector, example_prompt=example_prompt, suffix="Question: {input}", input_variables=["input"] )
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/examples/few_shot_examples.html
0065c6218ff1-5
suffix="Question: {input}", input_variables=["input"] ) print(prompt.format(input="Who was the father of Mary Ball Washington?")) Question: Who was the maternal grandfather of George Washington? Are follow up questions needed here: Yes. Follow up: Who was the mother of George Washington? Intermediate answer: The mother of George Washington was Mary Ball Washington. Follow up: Who was the father of Mary Ball Washington? Intermediate answer: The father of Mary Ball Washington was Joseph Ball. So the final answer is: Joseph Ball Question: Who was the father of Mary Ball Washington? previous How to create a custom prompt template next How to work with partial Prompt Templates Contents Use Case Using an example set Create the example set Create a formatter for the few shot examples Feed examples and formatter to FewShotPromptTemplate Using an example selector Feed examples into ExampleSelector Feed example selector into FewShotPromptTemplate By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/examples/few_shot_examples.html
ea6302b58a6b-0
.ipynb .pdf How to serialize prompts Contents PromptTemplate Loading from YAML Loading from JSON Loading Template from a File FewShotPromptTemplate Examples Loading from YAML Loading from JSON Examples in the Config Example Prompt from a File How to serialize prompts# It is often preferrable to store prompts not as python code but as files. This can make it easy to share, store, and version prompts. This notebook covers how to do that in LangChain, walking through all the different types of prompts and the different serialization options. At a high level, the following design principles are applied to serialization: Both JSON and YAML are supported. We want to support serialization methods that are human readable on disk, and YAML and JSON are two of the most popular methods for that. Note that this rule applies to prompts. For other assets, like Examples, different serialization methods may be supported. We support specifying everything in one file, or storing different components (templates, examples, etc) in different files and referencing them. For some cases, storing everything in file makes the most sense, but for others it is preferrable to split up some of the assets (long templates, large examples, reusable components). LangChain supports both. There is also a single entry point to load prompts from disk, making it easy to load any type of prompt. # All prompts are loaded through the `load_prompt` function. from langchain.prompts import load_prompt PromptTemplate# This section covers examples for loading a PromptTemplate. Loading from YAML# This shows an example of loading a PromptTemplate from YAML. !cat simple_prompt.yaml _type: prompt input_variables: ["adjective", "content"] template: Tell me a {adjective} joke about {content}. prompt = load_prompt("simple_prompt.yaml")
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/examples/prompt_serialization.html
ea6302b58a6b-1
prompt = load_prompt("simple_prompt.yaml") print(prompt.format(adjective="funny", content="chickens")) Tell me a funny joke about chickens. Loading from JSON# This shows an example of loading a PromptTemplate from JSON. !cat simple_prompt.json { "_type": "prompt", "input_variables": ["adjective", "content"], "template": "Tell me a {adjective} joke about {content}." } prompt = load_prompt("simple_prompt.json") print(prompt.format(adjective="funny", content="chickens")) Tell me a funny joke about chickens. Loading Template from a File# This shows an example of storing the template in a separate file and then referencing it in the config. Notice that the key changes from template to template_path. !cat simple_template.txt Tell me a {adjective} joke about {content}. !cat simple_prompt_with_template_file.json { "_type": "prompt", "input_variables": ["adjective", "content"], "template_path": "simple_template.txt" } prompt = load_prompt("simple_prompt_with_template_file.json") print(prompt.format(adjective="funny", content="chickens")) Tell me a funny joke about chickens. FewShotPromptTemplate# This section covers examples for loading few shot prompt templates. Examples# This shows an example of what examples stored as json might look like. !cat examples.json [ {"input": "happy", "output": "sad"}, {"input": "tall", "output": "short"} ] And here is what the same examples stored as yaml might look like. !cat examples.yaml - input: happy output: sad - input: tall output: short Loading from YAML#
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/examples/prompt_serialization.html
ea6302b58a6b-2
output: sad - input: tall output: short Loading from YAML# This shows an example of loading a few shot example from YAML. !cat few_shot_prompt.yaml _type: few_shot input_variables: ["adjective"] prefix: Write antonyms for the following words. example_prompt: _type: prompt input_variables: ["input", "output"] template: "Input: {input}\nOutput: {output}" examples: examples.json suffix: "Input: {adjective}\nOutput:" prompt = load_prompt("few_shot_prompt.yaml") print(prompt.format(adjective="funny")) Write antonyms for the following words. Input: happy Output: sad Input: tall Output: short Input: funny Output: The same would work if you loaded examples from the yaml file. !cat few_shot_prompt_yaml_examples.yaml _type: few_shot input_variables: ["adjective"] prefix: Write antonyms for the following words. example_prompt: _type: prompt input_variables: ["input", "output"] template: "Input: {input}\nOutput: {output}" examples: examples.yaml suffix: "Input: {adjective}\nOutput:" prompt = load_prompt("few_shot_prompt_yaml_examples.yaml") print(prompt.format(adjective="funny")) Write antonyms for the following words. Input: happy Output: sad Input: tall Output: short Input: funny Output: Loading from JSON# This shows an example of loading a few shot example from JSON. !cat few_shot_prompt.json { "_type": "few_shot",
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/examples/prompt_serialization.html
ea6302b58a6b-3
!cat few_shot_prompt.json { "_type": "few_shot", "input_variables": ["adjective"], "prefix": "Write antonyms for the following words.", "example_prompt": { "_type": "prompt", "input_variables": ["input", "output"], "template": "Input: {input}\nOutput: {output}" }, "examples": "examples.json", "suffix": "Input: {adjective}\nOutput:" } prompt = load_prompt("few_shot_prompt.json") print(prompt.format(adjective="funny")) Write antonyms for the following words. Input: happy Output: sad Input: tall Output: short Input: funny Output: Examples in the Config# This shows an example of referencing the examples directly in the config. !cat few_shot_prompt_examples_in.json { "_type": "few_shot", "input_variables": ["adjective"], "prefix": "Write antonyms for the following words.", "example_prompt": { "_type": "prompt", "input_variables": ["input", "output"], "template": "Input: {input}\nOutput: {output}" }, "examples": [ {"input": "happy", "output": "sad"}, {"input": "tall", "output": "short"} ], "suffix": "Input: {adjective}\nOutput:" } prompt = load_prompt("few_shot_prompt_examples_in.json") print(prompt.format(adjective="funny")) Write antonyms for the following words. Input: happy Output: sad Input: tall Output: short Input: funny Output: Example Prompt from a File#
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/examples/prompt_serialization.html
ea6302b58a6b-4
Output: short Input: funny Output: Example Prompt from a File# This shows an example of loading the PromptTemplate that is used to format the examples from a separate file. Note that the key changes from example_prompt to example_prompt_path. !cat example_prompt.json { "_type": "prompt", "input_variables": ["input", "output"], "template": "Input: {input}\nOutput: {output}" } !cat few_shot_prompt_example_prompt.json { "_type": "few_shot", "input_variables": ["adjective"], "prefix": "Write antonyms for the following words.", "example_prompt_path": "example_prompt.json", "examples": "examples.json", "suffix": "Input: {adjective}\nOutput:" } prompt = load_prompt("few_shot_prompt_example_prompt.json") print(prompt.format(adjective="funny")) Write antonyms for the following words. Input: happy Output: sad Input: tall Output: short Input: funny Output: previous How to work with partial Prompt Templates next Prompts Contents PromptTemplate Loading from YAML Loading from JSON Loading Template from a File FewShotPromptTemplate Examples Loading from YAML Loading from JSON Examples in the Config Example Prompt from a File By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/prompts/prompt_templates/examples/prompt_serialization.html
3d772ce2cec8-0
.ipynb .pdf Output Parsers Output Parsers# Language models output text. But many times you may want to get more structured information than just text back. This is where output parsers come in. Output parsers are classes that help structure language model responses. There are two main methods an output parser must implement: get_format_instructions() -> str: A method which returns a string containing instructions for how the output of a language model should be formatted. parse(str) -> Any: A method which takes in a string (assumed to be the response from a language model) and parses it into some structure. And then one optional one: parse_with_prompt(str, PromptValue) -> Any: A method which takes in a string (assumed to be the response from a language model) and a prompt (assumed to the prompt that generated such a response) and parses it into some structure. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Below we go over the main type of output parser, the PydanticOutputParser. See the examples folder for other options. from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate from langchain.llms import OpenAI from langchain.chat_models import ChatOpenAI from langchain.output_parsers import PydanticOutputParser from pydantic import BaseModel, Field, validator from typing import List model_name = 'text-davinci-003' temperature = 0.0 model = OpenAI(model_name=model_name, temperature=temperature) # Define your desired data structure. class Joke(BaseModel): setup: str = Field(description="question to set up a joke") punchline: str = Field(description="answer to resolve the joke")
https://python.langchain.com/en/latest/modules/prompts/output_parsers/getting_started.html
3d772ce2cec8-1
punchline: str = Field(description="answer to resolve the joke") # You can add custom validation logic easily with Pydantic. @validator('setup') def question_ends_with_question_mark(cls, field): if field[-1] != '?': raise ValueError("Badly formed question!") return field # Set up a parser + inject instructions into the prompt template. parser = PydanticOutputParser(pydantic_object=Joke) prompt = PromptTemplate( template="Answer the user query.\n{format_instructions}\n{query}\n", input_variables=["query"], partial_variables={"format_instructions": parser.get_format_instructions()} ) # And a query intented to prompt a language model to populate the data structure. joke_query = "Tell me a joke." _input = prompt.format_prompt(query=joke_query) output = model(_input.to_string()) parser.parse(output) Joke(setup='Why did the chicken cross the road?', punchline='To get to the other side!') previous Output Parsers next CommaSeparatedListOutputParser By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/prompts/output_parsers/getting_started.html
25e59f64b1ca-0
.ipynb .pdf OutputFixingParser OutputFixingParser# This output parser wraps another output parser and tries to fix any mmistakes The Pydantic guardrail simply tries to parse the LLM response. If it does not parse correctly, then it errors. But we can do other things besides throw errors. Specifically, we can pass the misformatted output, along with the formatted instructions, to the model and ask it to fix it. For this example, we’ll use the above OutputParser. Here’s what happens if we pass it a result that does not comply with the schema: from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate from langchain.llms import OpenAI from langchain.chat_models import ChatOpenAI from langchain.output_parsers import PydanticOutputParser from pydantic import BaseModel, Field, validator from typing import List class Actor(BaseModel): name: str = Field(description="name of an actor") film_names: List[str] = Field(description="list of names of films they starred in") actor_query = "Generate the filmography for a random actor." parser = PydanticOutputParser(pydantic_object=Actor) misformatted = "{'name': 'Tom Hanks', 'film_names': ['Forrest Gump']}" parser.parse(misformatted) --------------------------------------------------------------------------- JSONDecodeError Traceback (most recent call last) File ~/workplace/langchain/langchain/output_parsers/pydantic.py:23, in PydanticOutputParser.parse(self, text) 22 json_str = match.group() ---> 23 json_object = json.loads(json_str) 24 return self.pydantic_object.parse_obj(json_object)
https://python.langchain.com/en/latest/modules/prompts/output_parsers/examples/output_fixing_parser.html
25e59f64b1ca-1
24 return self.pydantic_object.parse_obj(json_object) File ~/.pyenv/versions/3.9.1/lib/python3.9/json/__init__.py:346, in loads(s, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw) 343 if (cls is None and object_hook is None and 344 parse_int is None and parse_float is None and 345 parse_constant is None and object_pairs_hook is None and not kw): --> 346 return _default_decoder.decode(s) 347 if cls is None: File ~/.pyenv/versions/3.9.1/lib/python3.9/json/decoder.py:337, in JSONDecoder.decode(self, s, _w) 333 """Return the Python representation of ``s`` (a ``str`` instance 334 containing a JSON document). 335 336 """ --> 337 obj, end = self.raw_decode(s, idx=_w(s, 0).end()) 338 end = _w(s, end).end() File ~/.pyenv/versions/3.9.1/lib/python3.9/json/decoder.py:353, in JSONDecoder.raw_decode(self, s, idx) 352 try: --> 353 obj, end = self.scan_once(s, idx) 354 except StopIteration as err: JSONDecodeError: Expecting property name enclosed in double quotes: line 1 column 2 (char 1) During handling of the above exception, another exception occurred: OutputParserException Traceback (most recent call last) Cell In[6], line 1 ----> 1 parser.parse(misformatted)
https://python.langchain.com/en/latest/modules/prompts/output_parsers/examples/output_fixing_parser.html
25e59f64b1ca-2
Cell In[6], line 1 ----> 1 parser.parse(misformatted) File ~/workplace/langchain/langchain/output_parsers/pydantic.py:29, in PydanticOutputParser.parse(self, text) 27 name = self.pydantic_object.__name__ 28 msg = f"Failed to parse {name} from completion {text}. Got: {e}" ---> 29 raise OutputParserException(msg) OutputParserException: Failed to parse Actor from completion {'name': 'Tom Hanks', 'film_names': ['Forrest Gump']}. Got: Expecting property name enclosed in double quotes: line 1 column 2 (char 1) Now we can construct and use a OutputFixingParser. This output parser takes as an argument another output parser but also an LLM with which to try to correct any formatting mistakes. from langchain.output_parsers import OutputFixingParser new_parser = OutputFixingParser.from_llm(parser=parser, llm=ChatOpenAI()) new_parser.parse(misformatted) Actor(name='Tom Hanks', film_names=['Forrest Gump']) previous CommaSeparatedListOutputParser next PydanticOutputParser By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/prompts/output_parsers/examples/output_fixing_parser.html
8bb335f9bfa7-0
.ipynb .pdf RetryOutputParser RetryOutputParser# While in some cases it is possible to fix any parsing mistakes by only looking at the output, in other cases it can’t. An example of this is when the output is not just in the incorrect format, but is partially complete. Consider the below example. from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate from langchain.llms import OpenAI from langchain.chat_models import ChatOpenAI from langchain.output_parsers import PydanticOutputParser, OutputFixingParser, RetryOutputParser from pydantic import BaseModel, Field, validator from typing import List template = """Based on the user question, provide an Action and Action Input for what step should be taken. {format_instructions} Question: {query} Response:""" class Action(BaseModel): action: str = Field(description="action to take") action_input: str = Field(description="input to the action") parser = PydanticOutputParser(pydantic_object=Action) prompt = PromptTemplate( template="Answer the user query.\n{format_instructions}\n{query}\n", input_variables=["query"], partial_variables={"format_instructions": parser.get_format_instructions()} ) prompt_value = prompt.format_prompt(query="who is leo di caprios gf?") bad_response = '{"action": "search"}' If we try to parse this response as is, we will get an error parser.parse(bad_response) --------------------------------------------------------------------------- ValidationError Traceback (most recent call last) File ~/workplace/langchain/langchain/output_parsers/pydantic.py:24, in PydanticOutputParser.parse(self, text) 23 json_object = json.loads(json_str)
https://python.langchain.com/en/latest/modules/prompts/output_parsers/examples/retry.html
8bb335f9bfa7-1
23 json_object = json.loads(json_str) ---> 24 return self.pydantic_object.parse_obj(json_object) 26 except (json.JSONDecodeError, ValidationError) as e: File ~/.pyenv/versions/3.9.1/envs/langchain/lib/python3.9/site-packages/pydantic/main.py:527, in pydantic.main.BaseModel.parse_obj() File ~/.pyenv/versions/3.9.1/envs/langchain/lib/python3.9/site-packages/pydantic/main.py:342, in pydantic.main.BaseModel.__init__() ValidationError: 1 validation error for Action action_input field required (type=value_error.missing) During handling of the above exception, another exception occurred: OutputParserException Traceback (most recent call last) Cell In[6], line 1 ----> 1 parser.parse(bad_response) File ~/workplace/langchain/langchain/output_parsers/pydantic.py:29, in PydanticOutputParser.parse(self, text) 27 name = self.pydantic_object.__name__ 28 msg = f"Failed to parse {name} from completion {text}. Got: {e}" ---> 29 raise OutputParserException(msg) OutputParserException: Failed to parse Action from completion {"action": "search"}. Got: 1 validation error for Action action_input field required (type=value_error.missing) If we try to use the OutputFixingParser to fix this error, it will be confused - namely, it doesn’t know what to actually put for action input. fix_parser = OutputFixingParser.from_llm(parser=parser, llm=ChatOpenAI()) fix_parser.parse(bad_response) Action(action='search', action_input='')
https://python.langchain.com/en/latest/modules/prompts/output_parsers/examples/retry.html
8bb335f9bfa7-2
fix_parser.parse(bad_response) Action(action='search', action_input='') Instead, we can use the RetryOutputParser, which passes in the prompt (as well as the original output) to try again to get a better response. from langchain.output_parsers import RetryWithErrorOutputParser retry_parser = RetryWithErrorOutputParser.from_llm(parser=parser, llm=OpenAI(temperature=0)) retry_parser.parse_with_prompt(bad_response, prompt_value) Action(action='search', action_input='who is leo di caprios gf?') previous PydanticOutputParser next Structured Output Parser By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 21, 2023.
https://python.langchain.com/en/latest/modules/prompts/output_parsers/examples/retry.html
20444c3240ea-0
.ipynb .pdf PydanticOutputParser PydanticOutputParser# This output parser allows users to specify an arbitrary JSON schema and query LLMs for JSON outputs that conform to that schema. Keep in mind that large language models are leaky abstractions! You’ll have to use an LLM with sufficient capacity to generate well-formed JSON. In the OpenAI family, DaVinci can do reliably but Curie’s ability already drops off dramatically. Use Pydantic to declare your data model. Pydantic’s BaseModel like a Python dataclass, but with actual type checking + coercion. from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate from langchain.llms import OpenAI from langchain.chat_models import ChatOpenAI from langchain.output_parsers import PydanticOutputParser from pydantic import BaseModel, Field, validator from typing import List model_name = 'text-davinci-003' temperature = 0.0 model = OpenAI(model_name=model_name, temperature=temperature) # Define your desired data structure. class Joke(BaseModel): setup: str = Field(description="question to set up a joke") punchline: str = Field(description="answer to resolve the joke") # You can add custom validation logic easily with Pydantic. @validator('setup') def question_ends_with_question_mark(cls, field): if field[-1] != '?': raise ValueError("Badly formed question!") return field # And a query intented to prompt a language model to populate the data structure. joke_query = "Tell me a joke." # Set up a parser + inject instructions into the prompt template. parser = PydanticOutputParser(pydantic_object=Joke) prompt = PromptTemplate(
https://python.langchain.com/en/latest/modules/prompts/output_parsers/examples/pydantic.html