id
stringlengths 14
16
| text
stringlengths 36
2.73k
| source
stringlengths 59
127
|
---|---|---|
4092f5c0fde6-6 | field entity_extraction_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['history', 'input'], output_parser=None, partial_variables={}, template='You are an AI assistant reading the transcript of a conversation between an AI and a human. Extract all of the proper nouns from the last line of conversation. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places.\n\nThe conversation history is provided just in case of a coreference (e.g. "What do you know about him" where "him" is defined in a previous line) -- ignore items mentioned there that are not in the last line.\n\nReturn the output as a single comma-separated list, or NONE if there is nothing of note to return (e.g. the user is just issuing a greeting or having a simple conversation).\n\nEXAMPLE\nConversation history:\nPerson #1: how\'s it going today?\nAI: "It\'s going great! How about you?"\nPerson #1: good! busy working on Langchain. lots to do.\nAI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"\nLast line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the UX, its integrations with various products the user might want ... a lot of stuff.\nOutput: Langchain\nEND OF EXAMPLE\n\nEXAMPLE\nConversation history:\nPerson #1: how\'s it going today?\nAI: "It\'s going great! How about you?"\nPerson #1: good! busy working on Langchain. lots to do.\nAI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"\nLast line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/memory.html |
4092f5c0fde6-7 | line:\nPerson #1: i\'m trying to improve Langchain\'s interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I\'m working with Person #2.\nOutput: Langchain, Person #2\nEND OF EXAMPLE\n\nConversation history (for reference only):\n{history}\nLast line of conversation (for extraction):\nHuman: {input}\n\nOutput:', template_format='f-string', validate_template=True)# | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/memory.html |
4092f5c0fde6-8 | field human_prefix: str = 'Human'#
field k: int = 2#
field kg: langchain.graphs.networkx_graph.NetworkxEntityGraph [Optional]# | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/memory.html |
4092f5c0fde6-9 | field knowledge_extraction_prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['history', 'input'], output_parser=None, partial_variables={}, template="You are a networked intelligence helping a human track knowledge triples about all relevant people, things, concepts, etc. and integrating them with your knowledge stored within your weights as well as that stored in a knowledge graph. Extract all of the knowledge triples from the last line of conversation. A knowledge triple is a clause that contains a subject, a predicate, and an object. The subject is the entity being described, the predicate is the property of the subject that is being described, and the object is the value of the property.\n\nEXAMPLE\nConversation history:\nPerson #1: Did you hear aliens landed in Area 51?\nAI: No, I didn't hear that. What do you know about Area 51?\nPerson #1: It's a secret military base in Nevada.\nAI: What do you know about Nevada?\nLast line of conversation:\nPerson #1: It's a state in the US. It's also the number 1 producer of gold in the US.\n\nOutput: (Nevada, is a, state)<|>(Nevada, is in, US)<|>(Nevada, is the number 1 producer of, gold)\nEND OF EXAMPLE\n\nEXAMPLE\nConversation history:\nPerson #1: Hello.\nAI: Hi! How are you?\nPerson #1: I'm good. How are you?\nAI: I'm good too.\nLast line of conversation:\nPerson #1: I'm going to the store.\n\nOutput: NONE\nEND OF EXAMPLE\n\nEXAMPLE\nConversation history:\nPerson #1: What do you know about Descartes?\nAI: Descartes was a French philosopher, mathematician, and scientist who lived in the 17th | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/memory.html |
4092f5c0fde6-10 | Descartes was a French philosopher, mathematician, and scientist who lived in the 17th century.\nPerson #1: The Descartes I'm referring to is a standup comedian and interior designer from Montreal.\nAI: Oh yes, He is a comedian and an interior designer. He has been in the industry for 30 years. His favorite food is baked bean pie.\nLast line of conversation:\nPerson #1: Oh huh. I know Descartes likes to drive antique scooters and play the mandolin.\nOutput: (Descartes, likes to drive, antique scooters)<|>(Descartes, plays, mandolin)\nEND OF EXAMPLE\n\nConversation history (for reference only):\n{history}\nLast line of conversation (for extraction):\nHuman: {input}\n\nOutput:", template_format='f-string', validate_template=True)# | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/memory.html |
4092f5c0fde6-11 | field llm: langchain.base_language.BaseLanguageModel [Required]#
field summary_message_cls: Type[langchain.schema.BaseMessage] = <class 'langchain.schema.SystemMessage'>#
Number of previous utterances to include in the context.
clear() β None[source]#
Clear memory contents.
get_current_entities(input_string: str) β List[str][source]#
get_knowledge_triplets(input_string: str) β List[langchain.graphs.networkx_graph.KnowledgeTriple][source]#
load_memory_variables(inputs: Dict[str, Any]) β Dict[str, Any][source]#
Return history buffer.
save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) β None[source]#
Save context from this conversation to buffer.
pydantic model langchain.memory.ConversationStringBufferMemory[source]#
Buffer for storing conversation memory.
field ai_prefix: str = 'AI'#
Prefix to use for AI generated responses.
field buffer: str = ''#
field human_prefix: str = 'Human'#
field input_key: Optional[str] = None#
field output_key: Optional[str] = None#
clear() β None[source]#
Clear memory contents.
load_memory_variables(inputs: Dict[str, Any]) β Dict[str, str][source]#
Return history buffer.
save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) β None[source]#
Save context from this conversation to buffer.
property memory_variables: List[str]#
Will always return list of memory variables.
:meta private:
pydantic model langchain.memory.ConversationSummaryBufferMemory[source]#
Buffer with summarizer for storing conversation memory.
field max_token_limit: int = 2000#
field memory_key: str = 'history'#
field moving_summary_buffer: str = ''# | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/memory.html |
4092f5c0fde6-12 | field memory_key: str = 'history'#
field moving_summary_buffer: str = ''#
clear() β None[source]#
Clear memory contents.
load_memory_variables(inputs: Dict[str, Any]) β Dict[str, Any][source]#
Return history buffer.
prune() β None[source]#
Prune buffer if it exceeds max token limit
save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) β None[source]#
Save context from this conversation to buffer.
property buffer: List[langchain.schema.BaseMessage]#
pydantic model langchain.memory.ConversationSummaryMemory[source]#
Conversation summarizer to memory.
field buffer: str = ''#
clear() β None[source]#
Clear memory contents.
classmethod from_messages(llm: langchain.base_language.BaseLanguageModel, chat_memory: langchain.schema.BaseChatMessageHistory, *, summarize_step: int = 2, **kwargs: Any) β langchain.memory.summary.ConversationSummaryMemory[source]#
load_memory_variables(inputs: Dict[str, Any]) β Dict[str, Any][source]#
Return history buffer.
save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) β None[source]#
Save context from this conversation to buffer.
pydantic model langchain.memory.ConversationTokenBufferMemory[source]#
Buffer for storing conversation memory.
field ai_prefix: str = 'AI'#
field human_prefix: str = 'Human'#
field llm: langchain.base_language.BaseLanguageModel [Required]#
field max_token_limit: int = 2000#
field memory_key: str = 'history'#
load_memory_variables(inputs: Dict[str, Any]) β Dict[str, Any][source]#
Return history buffer.
save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) β None[source]# | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/memory.html |
4092f5c0fde6-13 | Save context from this conversation to buffer. Pruned.
property buffer: List[langchain.schema.BaseMessage]#
String buffer of memory.
class langchain.memory.CosmosDBChatMessageHistory(cosmos_endpoint: str, cosmos_database: str, cosmos_container: str, session_id: str, user_id: str, credential: Any = None, connection_string: Optional[str] = None, ttl: Optional[int] = None, cosmos_client_kwargs: Optional[dict] = None)[source]#
Chat history backed by Azure CosmosDB.
add_message(message: langchain.schema.BaseMessage) β None[source]#
Add a self-created message to the store
clear() β None[source]#
Clear session memory from this memory and cosmos.
load_messages() β None[source]#
Retrieve the messages from Cosmos
prepare_cosmos() β None[source]#
Prepare the CosmosDB client.
Use this function or the context manager to make sure your database is ready.
upsert_messages() β None[source]#
Update the cosmosdb item.
class langchain.memory.DynamoDBChatMessageHistory(table_name: str, session_id: str, endpoint_url: Optional[str] = None)[source]#
Chat message history that stores history in AWS DynamoDB.
This class expects that a DynamoDB table with name table_name
and a partition Key of SessionId is present.
Parameters
table_name β name of the DynamoDB table
session_id β arbitrary key that is used to store the messages
of a single chat session.
endpoint_url β URL of the AWS endpoint to connect to. This argument
is optional and useful for test purposes, like using Localstack.
If you plan to use AWS cloud service, you normally donβt have to
worry about setting the endpoint_url.
add_message(message: langchain.schema.BaseMessage) β None[source]# | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/memory.html |
4092f5c0fde6-14 | add_message(message: langchain.schema.BaseMessage) β None[source]#
Append the message to the record in DynamoDB
clear() β None[source]#
Clear session memory from DynamoDB
property messages: List[langchain.schema.BaseMessage]#
Retrieve the messages from DynamoDB
class langchain.memory.FileChatMessageHistory(file_path: str)[source]#
Chat message history that stores history in a local file.
Parameters
file_path β path of the local file to store the messages.
add_message(message: langchain.schema.BaseMessage) β None[source]#
Append the message to the record in the local file
clear() β None[source]#
Clear session memory from the local file
property messages: List[langchain.schema.BaseMessage]#
Retrieve the messages from the local file
pydantic model langchain.memory.InMemoryEntityStore[source]#
Basic in-memory entity store.
field store: Dict[str, Optional[str]] = {}#
clear() β None[source]#
Delete all entities from store.
delete(key: str) β None[source]#
Delete entity value from store.
exists(key: str) β bool[source]#
Check if entity exists in store.
get(key: str, default: Optional[str] = None) β Optional[str][source]#
Get entity value from store.
set(key: str, value: Optional[str]) β None[source]#
Set entity value in store.
class langchain.memory.MomentoChatMessageHistory(session_id: str, cache_client: momento.CacheClient, cache_name: str, *, key_prefix: str = 'message_store:', ttl: Optional[timedelta] = None, ensure_cache_exists: bool = True)[source]#
Chat message history cache that uses Momento as a backend.
See https://gomomento.com/ | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/memory.html |
4092f5c0fde6-15 | See https://gomomento.com/
add_message(message: langchain.schema.BaseMessage) β None[source]#
Store a message in the cache.
Parameters
message (BaseMessage) β The message object to store.
Raises
SdkException β Momento service or network error.
Exception β Unexpected response.
clear() β None[source]#
Remove the sessionβs messages from the cache.
Raises
SdkException β Momento service or network error.
Exception β Unexpected response.
classmethod from_client_params(session_id: str, cache_name: str, ttl: timedelta, *, configuration: Optional[momento.config.Configuration] = None, auth_token: Optional[str] = None, **kwargs: Any) β MomentoChatMessageHistory[source]#
Construct cache from CacheClient parameters.
property messages: list[langchain.schema.BaseMessage]#
Retrieve the messages from Momento.
Raises
SdkException β Momento service or network error
Exception β Unexpected response
Returns
List of cached messages
Return type
list[BaseMessage]
class langchain.memory.MongoDBChatMessageHistory(connection_string: str, session_id: str, database_name: str = 'chat_history', collection_name: str = 'message_store')[source]#
Chat message history that stores history in MongoDB.
Parameters
connection_string β connection string to connect to MongoDB
session_id β arbitrary key that is used to store the messages
of a single chat session.
database_name β name of the database to use
collection_name β name of the collection to use
add_message(message: langchain.schema.BaseMessage) β None[source]#
Append the message to the record in MongoDB
clear() β None[source]#
Clear session memory from MongoDB
property messages: List[langchain.schema.BaseMessage]#
Retrieve the messages from MongoDB | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/memory.html |
4092f5c0fde6-16 | property messages: List[langchain.schema.BaseMessage]#
Retrieve the messages from MongoDB
class langchain.memory.PostgresChatMessageHistory(session_id: str, connection_string: str = 'postgresql://postgres:mypassword@localhost/chat_history', table_name: str = 'message_store')[source]#
add_message(message: langchain.schema.BaseMessage) β None[source]#
Append the message to the record in PostgreSQL
clear() β None[source]#
Clear session memory from PostgreSQL
property messages: List[langchain.schema.BaseMessage]#
Retrieve the messages from PostgreSQL
pydantic model langchain.memory.ReadOnlySharedMemory[source]#
A memory wrapper that is read-only and cannot be changed.
field memory: langchain.schema.BaseMemory [Required]#
clear() β None[source]#
Nothing to clear, got a memory like a vault.
load_memory_variables(inputs: Dict[str, Any]) β Dict[str, str][source]#
Load memory variables from memory.
save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) β None[source]#
Nothing should be saved or changed
property memory_variables: List[str]#
Return memory variables.
class langchain.memory.RedisChatMessageHistory(session_id: str, url: str = 'redis://localhost:6379/0', key_prefix: str = 'message_store:', ttl: Optional[int] = None)[source]#
add_message(message: langchain.schema.BaseMessage) β None[source]#
Append the message to the record in Redis
clear() β None[source]#
Clear session memory from Redis
property key: str#
Construct the record key to use
property messages: List[langchain.schema.BaseMessage]#
Retrieve the messages from Redis
pydantic model langchain.memory.RedisEntityStore[source]# | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/memory.html |
4092f5c0fde6-17 | Retrieve the messages from Redis
pydantic model langchain.memory.RedisEntityStore[source]#
Redis-backed Entity store. Entities get a TTL of 1 day by default, and
that TTL is extended by 3 days every time the entity is read back.
field key_prefix: str = 'memory_store'#
field recall_ttl: Optional[int] = 259200#
field redis_client: Any = None#
field session_id: str = 'default'#
field ttl: Optional[int] = 86400#
clear() β None[source]#
Delete all entities from store.
delete(key: str) β None[source]#
Delete entity value from store.
exists(key: str) β bool[source]#
Check if entity exists in store.
get(key: str, default: Optional[str] = None) β Optional[str][source]#
Get entity value from store.
set(key: str, value: Optional[str]) β None[source]#
Set entity value in store.
property full_key_prefix: str#
pydantic model langchain.memory.SQLiteEntityStore[source]#
SQLite-backed Entity store
field session_id: str = 'default'#
field table_name: str = 'memory_store'#
clear() β None[source]#
Delete all entities from store.
delete(key: str) β None[source]#
Delete entity value from store.
exists(key: str) β bool[source]#
Check if entity exists in store.
get(key: str, default: Optional[str] = None) β Optional[str][source]#
Get entity value from store.
set(key: str, value: Optional[str]) β None[source]#
Set entity value in store.
property full_table_name: str#
pydantic model langchain.memory.SimpleMemory[source]#
Simple memory for storing context or other bits of information that shouldnβt | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/memory.html |
4092f5c0fde6-18 | Simple memory for storing context or other bits of information that shouldnβt
ever change between prompts.
field memories: Dict[str, Any] = {}#
clear() β None[source]#
Nothing to clear, got a memory like a vault.
load_memory_variables(inputs: Dict[str, Any]) β Dict[str, str][source]#
Return key-value pairs given the text input to the chain.
If None, return all memories
save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) β None[source]#
Nothing should be saved or changed, my memory is set in stone.
property memory_variables: List[str]#
Input keys this memory class will load dynamically.
pydantic model langchain.memory.VectorStoreRetrieverMemory[source]#
Class for a VectorStore-backed memory object.
field input_key: Optional[str] = None#
Key name to index the inputs to load_memory_variables.
field memory_key: str = 'history'#
Key name to locate the memories in the result of load_memory_variables.
field retriever: langchain.vectorstores.base.VectorStoreRetriever [Required]#
VectorStoreRetriever object to connect to.
field return_docs: bool = False#
Whether or not to return the result of querying the database directly.
clear() β None[source]#
Nothing to clear.
load_memory_variables(inputs: Dict[str, Any]) β Dict[str, Union[List[langchain.schema.Document], str]][source]#
Return history buffer.
save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) β None[source]#
Save context from this conversation to buffer.
property memory_variables: List[str]#
The list of keys emitted from the load_memory_variables method.
previous
Document Transformers
next
Agents
By Harrison Chase | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/memory.html |
4092f5c0fde6-19 | previous
Document Transformers
next
Agents
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/memory.html |
9607edd1bc6f-0 | .rst
.pdf
PromptTemplates
PromptTemplates#
Prompt template classes.
pydantic model langchain.prompts.BaseChatPromptTemplate[source]#
format(**kwargs: Any) β str[source]#
Format the prompt with the inputs.
Parameters
kwargs β Any arguments to be passed to the prompt template.
Returns
A formatted string.
Example:
prompt.format(variable1="foo")
abstract format_messages(**kwargs: Any) β List[langchain.schema.BaseMessage][source]#
Format kwargs into a list of messages.
format_prompt(**kwargs: Any) β langchain.schema.PromptValue[source]#
Create Chat Messages.
pydantic model langchain.prompts.BasePromptTemplate[source]#
Base class for all prompt templates, returning a prompt.
field input_variables: List[str] [Required]#
A list of the names of the variables the prompt template expects.
field output_parser: Optional[langchain.schema.BaseOutputParser] = None#
How to parse the output of calling an LLM on this formatted prompt.
dict(**kwargs: Any) β Dict[source]#
Return dictionary representation of prompt.
abstract format(**kwargs: Any) β str[source]#
Format the prompt with the inputs.
Parameters
kwargs β Any arguments to be passed to the prompt template.
Returns
A formatted string.
Example:
prompt.format(variable1="foo")
abstract format_prompt(**kwargs: Any) β langchain.schema.PromptValue[source]#
Create Chat Messages.
partial(**kwargs: Union[str, Callable[[], str]]) β langchain.prompts.base.BasePromptTemplate[source]#
Return a partial of the prompt template.
save(file_path: Union[pathlib.Path, str]) β None[source]#
Save the prompt.
Parameters
file_path β Path to directory to save prompt to.
Example:
.. code-block:: python | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/prompts.html |
9607edd1bc6f-1 | Example:
.. code-block:: python
prompt.save(file_path=βpath/prompt.yamlβ)
property lc_serializable: bool#
Return whether or not the class is serializable.
pydantic model langchain.prompts.ChatPromptTemplate[source]#
format(**kwargs: Any) β str[source]#
Format the prompt with the inputs.
Parameters
kwargs β Any arguments to be passed to the prompt template.
Returns
A formatted string.
Example:
prompt.format(variable1="foo")
format_messages(**kwargs: Any) β List[langchain.schema.BaseMessage][source]#
Format kwargs into a list of messages.
partial(**kwargs: Union[str, Callable[[], str]]) β langchain.prompts.base.BasePromptTemplate[source]#
Return a partial of the prompt template.
save(file_path: Union[pathlib.Path, str]) β None[source]#
Save the prompt.
Parameters
file_path β Path to directory to save prompt to.
Example:
.. code-block:: python
prompt.save(file_path=βpath/prompt.yamlβ)
pydantic model langchain.prompts.FewShotPromptTemplate[source]#
Prompt template that contains few shot examples.
field example_prompt: langchain.prompts.prompt.PromptTemplate [Required]#
PromptTemplate used to format an individual example.
field example_selector: Optional[langchain.prompts.example_selector.base.BaseExampleSelector] = None#
ExampleSelector to choose the examples to format into the prompt.
Either this or examples should be provided.
field example_separator: str = '\n\n'#
String separator used to join the prefix, the examples, and suffix.
field examples: Optional[List[dict]] = None#
Examples to format into the prompt.
Either this or example_selector should be provided.
field input_variables: List[str] [Required]# | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/prompts.html |
9607edd1bc6f-2 | field input_variables: List[str] [Required]#
A list of the names of the variables the prompt template expects.
field prefix: str = ''#
A prompt template string to put before the examples.
field suffix: str [Required]#
A prompt template string to put after the examples.
field template_format: str = 'f-string'#
The format of the prompt template. Options are: βf-stringβ, βjinja2β.
field validate_template: bool = True#
Whether or not to try validating the template.
dict(**kwargs: Any) β Dict[source]#
Return a dictionary of the prompt.
format(**kwargs: Any) β str[source]#
Format the prompt with the inputs.
Parameters
kwargs β Any arguments to be passed to the prompt template.
Returns
A formatted string.
Example:
prompt.format(variable1="foo")
property lc_serializable: bool#
Return whether or not the class is serializable.
pydantic model langchain.prompts.FewShotPromptWithTemplates[source]#
Prompt template that contains few shot examples.
field example_prompt: langchain.prompts.prompt.PromptTemplate [Required]#
PromptTemplate used to format an individual example.
field example_selector: Optional[langchain.prompts.example_selector.base.BaseExampleSelector] = None#
ExampleSelector to choose the examples to format into the prompt.
Either this or examples should be provided.
field example_separator: str = '\n\n'#
String separator used to join the prefix, the examples, and suffix.
field examples: Optional[List[dict]] = None#
Examples to format into the prompt.
Either this or example_selector should be provided.
field input_variables: List[str] [Required]#
A list of the names of the variables the prompt template expects. | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/prompts.html |
9607edd1bc6f-3 | A list of the names of the variables the prompt template expects.
field prefix: Optional[langchain.prompts.base.StringPromptTemplate] = None#
A PromptTemplate to put before the examples.
field suffix: langchain.prompts.base.StringPromptTemplate [Required]#
A PromptTemplate to put after the examples.
field template_format: str = 'f-string'#
The format of the prompt template. Options are: βf-stringβ, βjinja2β.
field validate_template: bool = True#
Whether or not to try validating the template.
dict(**kwargs: Any) β Dict[source]#
Return a dictionary of the prompt.
format(**kwargs: Any) β str[source]#
Format the prompt with the inputs.
Parameters
kwargs β Any arguments to be passed to the prompt template.
Returns
A formatted string.
Example:
prompt.format(variable1="foo")
pydantic model langchain.prompts.MessagesPlaceholder[source]#
Prompt template that assumes variable is already list of messages.
format_messages(**kwargs: Any) β List[langchain.schema.BaseMessage][source]#
To a BaseMessage.
property input_variables: List[str]#
Input variables for this prompt template.
langchain.prompts.Prompt#
alias of langchain.prompts.prompt.PromptTemplate
pydantic model langchain.prompts.PromptTemplate[source]#
Schema to represent a prompt for an LLM.
Example
from langchain import PromptTemplate
prompt = PromptTemplate(input_variables=["foo"], template="Say {foo}")
field input_variables: List[str] [Required]#
A list of the names of the variables the prompt template expects.
field template: str [Required]#
The prompt template.
field template_format: str = 'f-string'# | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/prompts.html |
9607edd1bc6f-4 | The prompt template.
field template_format: str = 'f-string'#
The format of the prompt template. Options are: βf-stringβ, βjinja2β.
field validate_template: bool = True#
Whether or not to try validating the template.
format(**kwargs: Any) β str[source]#
Format the prompt with the inputs.
Parameters
kwargs β Any arguments to be passed to the prompt template.
Returns
A formatted string.
Example:
prompt.format(variable1="foo")
classmethod from_examples(examples: List[str], suffix: str, input_variables: List[str], example_separator: str = '\n\n', prefix: str = '', **kwargs: Any) β langchain.prompts.prompt.PromptTemplate[source]#
Take examples in list format with prefix and suffix to create a prompt.
Intended to be used as a way to dynamically create a prompt from examples.
Parameters
examples β List of examples to use in the prompt.
suffix β String to go after the list of examples. Should generally
set up the userβs input.
input_variables β A list of variable names the final prompt template
will expect.
example_separator β The separator to use in between examples. Defaults
to two new line characters.
prefix β String that should go before any examples. Generally includes
examples. Default to an empty string.
Returns
The final prompt generated.
classmethod from_file(template_file: Union[str, pathlib.Path], input_variables: List[str], **kwargs: Any) β langchain.prompts.prompt.PromptTemplate[source]#
Load a prompt from a file.
Parameters
template_file β The path to the file containing the prompt template.
input_variables β A list of variable names the final prompt template
will expect.
Returns
The prompt loaded from the file. | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/prompts.html |
9607edd1bc6f-5 | will expect.
Returns
The prompt loaded from the file.
classmethod from_template(template: str, **kwargs: Any) β langchain.prompts.prompt.PromptTemplate[source]#
Load a prompt template from a template.
property lc_attributes: Dict[str, Any]#
Return a list of attribute names that should be included in the
serialized kwargs. These attributes must be accepted by the
constructor.
pydantic model langchain.prompts.StringPromptTemplate[source]#
String prompt should expose the format method, returning a prompt.
format_prompt(**kwargs: Any) β langchain.schema.PromptValue[source]#
Create Chat Messages.
langchain.prompts.load_prompt(path: Union[str, pathlib.Path]) β langchain.prompts.base.BasePromptTemplate[source]#
Unified method for loading a prompt from LangChainHub or local fs.
previous
Prompts
next
Example Selector
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/prompts.html |
28d346c00700-0 | .rst
.pdf
Output Parsers
Output Parsers#
pydantic model langchain.output_parsers.CommaSeparatedListOutputParser[source]#
Parse out comma separated lists.
get_format_instructions() β str[source]#
Instructions on how the LLM output should be formatted.
parse(text: str) β List[str][source]#
Parse the output of an LLM call.
pydantic model langchain.output_parsers.DatetimeOutputParser[source]#
field format: str = '%Y-%m-%dT%H:%M:%S.%fZ'#
get_format_instructions() β str[source]#
Instructions on how the LLM output should be formatted.
parse(response: str) β datetime.datetime[source]#
Parse the output of an LLM call.
A method which takes in a string (assumed output of a language model )
and parses it into some structure.
Parameters
text β output of language model
Returns
structured output
pydantic model langchain.output_parsers.GuardrailsOutputParser[source]#
field guard: Any = None#
classmethod from_rail(rail_file: str, num_reasks: int = 1) β langchain.output_parsers.rail_parser.GuardrailsOutputParser[source]#
classmethod from_rail_string(rail_str: str, num_reasks: int = 1) β langchain.output_parsers.rail_parser.GuardrailsOutputParser[source]#
get_format_instructions() β str[source]#
Instructions on how the LLM output should be formatted.
parse(text: str) β Dict[source]#
Parse the output of an LLM call.
A method which takes in a string (assumed output of a language model )
and parses it into some structure.
Parameters
text β output of language model
Returns
structured output | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/output_parsers.html |
28d346c00700-1 | Parameters
text β output of language model
Returns
structured output
pydantic model langchain.output_parsers.ListOutputParser[source]#
Class to parse the output of an LLM call to a list.
abstract parse(text: str) β List[str][source]#
Parse the output of an LLM call.
pydantic model langchain.output_parsers.OutputFixingParser[source]#
Wraps a parser and tries to fix parsing errors.
field parser: BaseOutputParser[T] [Required]#
field retry_chain: LLMChain [Required]#
classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, parser: langchain.schema.BaseOutputParser[langchain.output_parsers.fix.T], prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['completion', 'error', 'instructions'], output_parser=None, partial_variables={}, template='Instructions:\n--------------\n{instructions}\n--------------\nCompletion:\n--------------\n{completion}\n--------------\n\nAbove, the Completion did not satisfy the constraints given in the Instructions.\nError:\n--------------\n{error}\n--------------\n\nPlease try again. Please only respond with an answer that satisfies the constraints laid out in the Instructions:', template_format='f-string', validate_template=True)) β langchain.output_parsers.fix.OutputFixingParser[langchain.output_parsers.fix.T][source]#
get_format_instructions() β str[source]#
Instructions on how the LLM output should be formatted.
parse(completion: str) β langchain.output_parsers.fix.T[source]#
Parse the output of an LLM call.
A method which takes in a string (assumed output of a language model )
and parses it into some structure.
Parameters
text β output of language model
Returns
structured output | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/output_parsers.html |
28d346c00700-2 | Parameters
text β output of language model
Returns
structured output
pydantic model langchain.output_parsers.PydanticOutputParser[source]#
field pydantic_object: Type[langchain.output_parsers.pydantic.T] [Required]#
get_format_instructions() β str[source]#
Instructions on how the LLM output should be formatted.
parse(text: str) β langchain.output_parsers.pydantic.T[source]#
Parse the output of an LLM call.
A method which takes in a string (assumed output of a language model )
and parses it into some structure.
Parameters
text β output of language model
Returns
structured output
pydantic model langchain.output_parsers.RegexDictParser[source]#
Class to parse the output into a dictionary.
field no_update_value: Optional[str] = None#
field output_key_to_format: Dict[str, str] [Required]#
field regex_pattern: str = "{}:\\s?([^.'\\n']*)\\.?"#
parse(text: str) β Dict[str, str][source]#
Parse the output of an LLM call.
pydantic model langchain.output_parsers.RegexParser[source]#
Class to parse the output into a dictionary.
field default_output_key: Optional[str] = None#
field output_keys: List[str] [Required]#
field regex: str [Required]#
parse(text: str) β Dict[str, str][source]#
Parse the output of an LLM call.
pydantic model langchain.output_parsers.ResponseSchema[source]#
field description: str [Required]#
field name: str [Required]#
field type: str = 'string'#
pydantic model langchain.output_parsers.RetryOutputParser[source]#
Wraps a parser and tries to fix parsing errors. | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/output_parsers.html |
28d346c00700-3 | Wraps a parser and tries to fix parsing errors.
Does this by passing the original prompt and the completion to another
LLM, and telling it the completion did not satisfy criteria in the prompt.
field parser: BaseOutputParser[T] [Required]#
field retry_chain: LLMChain [Required]#
classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, parser: langchain.schema.BaseOutputParser[langchain.output_parsers.retry.T], prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['completion', 'prompt'], output_parser=None, partial_variables={}, template='Prompt:\n{prompt}\nCompletion:\n{completion}\n\nAbove, the Completion did not satisfy the constraints given in the Prompt.\nPlease try again:', template_format='f-string', validate_template=True)) β langchain.output_parsers.retry.RetryOutputParser[langchain.output_parsers.retry.T][source]#
get_format_instructions() β str[source]#
Instructions on how the LLM output should be formatted.
parse(completion: str) β langchain.output_parsers.retry.T[source]#
Parse the output of an LLM call.
A method which takes in a string (assumed output of a language model )
and parses it into some structure.
Parameters
text β output of language model
Returns
structured output
parse_with_prompt(completion: str, prompt_value: langchain.schema.PromptValue) β langchain.output_parsers.retry.T[source]#
Optional method to parse the output of an LLM call with a prompt.
The prompt is largely provided in the event the OutputParser wants
to retry or fix the output in some way, and needs information from
the prompt to do so.
Parameters
completion β output of language model
prompt β prompt value
Returns
structured output | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/output_parsers.html |
28d346c00700-4 | Parameters
completion β output of language model
prompt β prompt value
Returns
structured output
pydantic model langchain.output_parsers.RetryWithErrorOutputParser[source]#
Wraps a parser and tries to fix parsing errors.
Does this by passing the original prompt, the completion, AND the error
that was raised to another language model and telling it that the completion
did not work, and raised the given error. Differs from RetryOutputParser
in that this implementation provides the error that was raised back to the
LLM, which in theory should give it more information on how to fix it.
field parser: BaseOutputParser[T] [Required]#
field retry_chain: LLMChain [Required]#
classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, parser: langchain.schema.BaseOutputParser[langchain.output_parsers.retry.T], prompt: langchain.prompts.base.BasePromptTemplate = PromptTemplate(input_variables=['completion', 'error', 'prompt'], output_parser=None, partial_variables={}, template='Prompt:\n{prompt}\nCompletion:\n{completion}\n\nAbove, the Completion did not satisfy the constraints given in the Prompt.\nDetails: {error}\nPlease try again:', template_format='f-string', validate_template=True)) β langchain.output_parsers.retry.RetryWithErrorOutputParser[langchain.output_parsers.retry.T][source]#
get_format_instructions() β str[source]#
Instructions on how the LLM output should be formatted.
parse(completion: str) β langchain.output_parsers.retry.T[source]#
Parse the output of an LLM call.
A method which takes in a string (assumed output of a language model )
and parses it into some structure.
Parameters
text β output of language model
Returns
structured output | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/output_parsers.html |
28d346c00700-5 | Parameters
text β output of language model
Returns
structured output
parse_with_prompt(completion: str, prompt_value: langchain.schema.PromptValue) β langchain.output_parsers.retry.T[source]#
Optional method to parse the output of an LLM call with a prompt.
The prompt is largely provided in the event the OutputParser wants
to retry or fix the output in some way, and needs information from
the prompt to do so.
Parameters
completion β output of language model
prompt β prompt value
Returns
structured output
pydantic model langchain.output_parsers.StructuredOutputParser[source]#
field response_schemas: List[ResponseSchema] [Required]#
classmethod from_response_schemas(response_schemas: List[langchain.output_parsers.structured.ResponseSchema]) β langchain.output_parsers.structured.StructuredOutputParser[source]#
get_format_instructions() β str[source]#
Instructions on how the LLM output should be formatted.
parse(text: str) β Any[source]#
Parse the output of an LLM call.
A method which takes in a string (assumed output of a language model )
and parses it into some structure.
Parameters
text β output of language model
Returns
structured output
previous
Example Selector
next
Chat Prompt Templates
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/output_parsers.html |
940630d84aef-0 | .rst
.pdf
SearxNG Search
Contents
Quick Start
Searching
Engine Parameters
Search Tips
SearxNG Search#
Utility for using SearxNG meta search API.
SearxNG is a privacy-friendly free metasearch engine that aggregates results from
multiple search engines and databases and
supports the OpenSearch
specification.
More details on the installation instructions here.
For the search API refer to https://docs.searxng.org/dev/search_api.html
Quick Start#
In order to use this utility you need to provide the searx host. This can be done
by passing the named parameter searx_host
or exporting the environment variable SEARX_HOST.
Note: this is the only required parameter.
Then create a searx search instance like this:
from langchain.utilities import SearxSearchWrapper
# when the host starts with `http` SSL is disabled and the connection
# is assumed to be on a private network
searx_host='http://self.hosted'
search = SearxSearchWrapper(searx_host=searx_host)
You can now use the search instance to query the searx API.
Searching#
Use the run() and
results() methods to query the searx API.
Other methods are available for convenience.
SearxResults is a convenience wrapper around the raw json result.
Example usage of the run method to make a search:
s.run(query="what is the best search engine?")
Engine Parameters#
You can pass any accepted searx search API parameters to the
SearxSearchWrapper instance.
In the following example we are using the
engines and the language parameters:
# assuming the searx host is set as above or exported as an env variable | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/searx_search.html |
940630d84aef-1 | # assuming the searx host is set as above or exported as an env variable
s = SearxSearchWrapper(engines=['google', 'bing'],
language='es')
Search Tips#
Searx offers a special
search syntax
that can also be used instead of passing engine parameters.
For example the following query:
s = SearxSearchWrapper("langchain library", engines=['github'])
# can also be written as:
s = SearxSearchWrapper("langchain library !github")
# or even:
s = SearxSearchWrapper("langchain library !gh")
In some situations you might want to pass an extra string to the search query.
For example when the run() method is called by an agent. The search suffix can
also be used as a way to pass extra parameters to searx or the underlying search
engines.
# select the github engine and pass the search suffix
s = SearchWrapper("langchain library", query_suffix="!gh")
s = SearchWrapper("langchain library")
# select github the conventional google search syntax
s.run("large language models", query_suffix="site:github.com")
NOTE: A search suffix can be defined on both the instance and the method level.
The resulting query will be the concatenation of the two with the former taking
precedence.
See SearxNG Configured Engines and
SearxNG Search Syntax
for more details.
Notes
This wrapper is based on the SearxNG fork searxng/searxng which is
better maintained than the original Searx project and offers more features.
Public searxNG instances often use a rate limiter for API usage, so you might want to
use a self hosted instance and disable the rate limiter. | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/searx_search.html |
940630d84aef-2 | use a self hosted instance and disable the rate limiter.
If you are self-hosting an instance you can customize the rate limiter for your
own network as described here.
For a list of public SearxNG instances see https://searx.space/
class langchain.utilities.searx_search.SearxResults(data: str)[source]#
Dict like wrapper around search api results.
property answers: Any#
Helper accessor on the json result.
pydantic model langchain.utilities.searx_search.SearxSearchWrapper[source]#
Wrapper for Searx API.
To use you need to provide the searx host by passing the named parameter
searx_host or exporting the environment variable SEARX_HOST.
In some situations you might want to disable SSL verification, for example
if you are running searx locally. You can do this by passing the named parameter
unsecure. You can also pass the host url scheme as http to disable SSL.
Example
from langchain.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://localhost:8888")
Example with SSL disabled:from langchain.utilities import SearxSearchWrapper
# note the unsecure parameter is not needed if you pass the url scheme as
# http
searx = SearxSearchWrapper(searx_host="http://localhost:8888",
unsecure=True)
Validators
disable_ssl_warnings Β» unsecure
validate_params Β» all fields
field aiosession: Optional[Any] = None#
field categories: Optional[List[str]] = []#
field engines: Optional[List[str]] = []#
field headers: Optional[dict] = None#
field k: int = 10#
field params: dict [Optional]#
field query_suffix: Optional[str] = ''# | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/searx_search.html |
940630d84aef-3 | field params: dict [Optional]#
field query_suffix: Optional[str] = ''#
field searx_host: str = ''#
field unsecure: bool = False#
async aresults(query: str, num_results: int, engines: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) β List[Dict][source]#
Asynchronously query with json results.
Uses aiohttp. See results for more info.
async arun(query: str, engines: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) β str[source]#
Asynchronously version of run.
results(query: str, num_results: int, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) β List[Dict][source]#
Run query through Searx API and returns the results with metadata.
Parameters
query β The query to search for.
query_suffix β Extra suffix appended to the query.
num_results β Limit the number of results to return.
engines β List of engines to use for the query.
categories β List of categories to use for the query.
**kwargs β extra parameters to pass to the searx API.
Returns
{snippet: The description of the result.
title: The title of the result.
link: The link to the result.
engines: The engines used for the result.
category: Searx category of the result.
}
Return type
Dict with the following keys
run(query: str, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) β str[source]# | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/searx_search.html |
940630d84aef-4 | Run query through Searx API and parse results.
You can pass any other params to the searx query API.
Parameters
query β The query to search for.
query_suffix β Extra suffix appended to the query.
engines β List of engines to use for the query.
categories β List of categories to use for the query.
**kwargs β extra parameters to pass to the searx API.
Returns
The result of the query.
Return type
str
Raises
ValueError β If an error occured with the query.
Example
This will make a query to the qwant engine:
from langchain.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://my.searx.host")
searx.run("what is the weather in France ?", engine="qwant")
# the same result can be achieved using the `!` syntax of searx
# to select the engine using `query_suffix`
searx.run("what is the weather in France ?", query_suffix="!qwant")
Contents
Quick Start
Searching
Engine Parameters
Search Tips
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/searx_search.html |
b81cb3fdf556-0 | .rst
.pdf
Text Splitter
Text Splitter#
Functionality for splitting text.
class langchain.text_splitter.CharacterTextSplitter(separator: str = '\n\n', **kwargs: Any)[source]#
Implementation of splitting text that looks at characters.
split_text(text: str) β List[str][source]#
Split incoming text and return chunks.
class langchain.text_splitter.HeaderType[source]#
data: str#
level: int#
name: str#
class langchain.text_splitter.Language(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)[source]#
CPP = 'cpp'#
GO = 'go'#
HTML = 'html'#
JAVA = 'java'#
JS = 'js'#
LATEX = 'latex'#
MARKDOWN = 'markdown'#
PHP = 'php'#
PROTO = 'proto'#
PYTHON = 'python'#
RST = 'rst'#
RUBY = 'ruby'#
RUST = 'rust'#
SCALA = 'scala'#
SOL = 'sol'#
SWIFT = 'swift'#
class langchain.text_splitter.LatexTextSplitter(**kwargs: Any)[source]#
Attempts to split the text along Latex-formatted layout elements.
class langchain.text_splitter.LineType[source]#
content: str#
metadata: Dict[str, str]#
class langchain.text_splitter.MarkdownHeaderTextSplitter(headers_to_split_on: List[Tuple[str, str]], return_each_line: bool = False)[source]#
Implementation of splitting markdown files based on specified headers.
aggregate_lines_to_chunks(lines: List[langchain.text_splitter.LineType]) β List[langchain.text_splitter.LineType][source]#
Combine lines with common metadata into chunks | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/text_splitter.html |
b81cb3fdf556-1 | Combine lines with common metadata into chunks
:param lines: Line of text / associated header metadata
split_text(text: str) β List[langchain.text_splitter.LineType][source]#
Split markdown file
:param text: Markdown file
class langchain.text_splitter.MarkdownTextSplitter(**kwargs: Any)[source]#
Attempts to split the text along Markdown-formatted headings.
class langchain.text_splitter.NLTKTextSplitter(separator: str = '\n\n', **kwargs: Any)[source]#
Implementation of splitting text that looks at sentences using NLTK.
split_text(text: str) β List[str][source]#
Split incoming text and return chunks.
class langchain.text_splitter.PythonCodeTextSplitter(**kwargs: Any)[source]#
Attempts to split the text along Python syntax.
class langchain.text_splitter.RecursiveCharacterTextSplitter(separators: Optional[List[str]] = None, keep_separator: bool = True, **kwargs: Any)[source]#
Implementation of splitting text that looks at characters.
Recursively tries to split by different characters to find one
that works.
classmethod from_language(language: langchain.text_splitter.Language, **kwargs: Any) β langchain.text_splitter.RecursiveCharacterTextSplitter[source]#
static get_separators_for_language(language: langchain.text_splitter.Language) β List[str][source]#
split_text(text: str) β List[str][source]#
Split text into multiple components.
class langchain.text_splitter.SentenceTransformersTokenTextSplitter(chunk_overlap: int = 50, model_name: str = 'sentence-transformers/all-mpnet-base-v2', tokens_per_chunk: Optional[int] = None, **kwargs: Any)[source]#
Implementation of splitting text that looks at tokens. | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/text_splitter.html |
b81cb3fdf556-2 | Implementation of splitting text that looks at tokens.
count_tokens(*, text: str) β int[source]#
split_text(text: str) β List[str][source]#
Split text into multiple components.
class langchain.text_splitter.SpacyTextSplitter(separator: str = '\n\n', pipeline: str = 'en_core_web_sm', **kwargs: Any)[source]#
Implementation of splitting text that looks at sentences using Spacy.
split_text(text: str) β List[str][source]#
Split incoming text and return chunks.
class langchain.text_splitter.TextSplitter(chunk_size: int = 4000, chunk_overlap: int = 200, length_function: typing.Callable[[str], int] = <built-in function len>, keep_separator: bool = False, add_start_index: bool = False)[source]#
Interface for splitting text into chunks.
async atransform_documents(documents: Sequence[langchain.schema.Document], **kwargs: Any) β Sequence[langchain.schema.Document][source]#
Asynchronously transform a sequence of documents by splitting them.
create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) β List[langchain.schema.Document][source]#
Create documents from a list of texts.
classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) β langchain.text_splitter.TextSplitter[source]#
Text splitter that uses HuggingFace tokenizer to count length.
classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) β langchain.text_splitter.TS[source]# | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/text_splitter.html |
b81cb3fdf556-3 | Text splitter that uses tiktoken encoder to count length.
split_documents(documents: Iterable[langchain.schema.Document]) β List[langchain.schema.Document][source]#
Split documents.
abstract split_text(text: str) β List[str][source]#
Split text into multiple components.
transform_documents(documents: Sequence[langchain.schema.Document], **kwargs: Any) β Sequence[langchain.schema.Document][source]#
Transform sequence of documents by splitting them.
class langchain.text_splitter.TokenTextSplitter(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any)[source]#
Implementation of splitting text that looks at tokens.
split_text(text: str) β List[str][source]#
Split text into multiple components.
class langchain.text_splitter.Tokenizer(chunk_overlap: 'int', tokens_per_chunk: 'int', decode: 'Callable[[list[int]], str]', encode: 'Callable[[str], List[int]]')[source]#
chunk_overlap: int#
decode: Callable[[list[int]], str]#
encode: Callable[[str], List[int]]#
tokens_per_chunk: int#
langchain.text_splitter.split_text_on_tokens(*, text: str, tokenizer: langchain.text_splitter.Tokenizer) β List[str][source]#
Split incoming text and return chunks.
previous
Docstore
next
Document Loaders
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/reference/modules/text_splitter.html |
45e17ae25136-0 | .rst
.pdf
Evaluation
Contents
The Problem
The Solution
The Examples
Other Examples
Evaluation#
Note
Conceptual Guide
This section of documentation covers how we approach and think about evaluation in LangChain.
Both evaluation of internal chains/agents, but also how we would recommend people building on top of LangChain approach evaluation.
The Problem#
It can be really hard to evaluate LangChain chains and agents.
There are two main reasons for this:
# 1: Lack of data
You generally donβt have a ton of data to evaluate your chains/agents over before starting a project.
This is usually because Large Language Models (the core of most chains/agents) are terrific few-shot and zero shot learners,
meaning you are almost always able to get started on a particular task (text-to-SQL, question answering, etc) without
a large dataset of examples.
This is in stark contrast to traditional machine learning where you had to first collect a bunch of datapoints
before even getting started using a model.
# 2: Lack of metrics
Most chains/agents are performing tasks for which there are not very good metrics to evaluate performance.
For example, one of the most common use cases is generating text of some form.
Evaluating generated text is much more complicated than evaluating a classification prediction, or a numeric prediction.
The Solution#
LangChain attempts to tackle both of those issues.
What we have so far are initial passes at solutions - we do not think we have a perfect solution.
So we very much welcome feedback, contributions, integrations, and thoughts on this.
Here is what we have for each problem so far:
# 1: Lack of data
We have started LangChainDatasets a Community space on Hugging Face.
We intend this to be a collection of open source datasets for evaluating common chains and agents. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/evaluation.html |
45e17ae25136-1 | We intend this to be a collection of open source datasets for evaluating common chains and agents.
We have contributed five datasets of our own to start, but we highly intend this to be a community effort.
In order to contribute a dataset, you simply need to join the community and then you will be able to upload datasets.
Weβre also aiming to make it as easy as possible for people to create their own datasets.
As a first pass at this, weβve added a QAGenerationChain, which given a document comes up
with question-answer pairs that can be used to evaluate question-answering tasks over that document down the line.
See this notebook for an example of how to use this chain.
# 2: Lack of metrics
We have two solutions to the lack of metrics.
The first solution is to use no metrics, and rather just rely on looking at results by eye to get a sense for how the chain/agent is performing.
To assist in this, we have developed (and will continue to develop) tracing, a UI-based visualizer of your chain and agent runs.
The second solution we recommend is to use Language Models themselves to evaluate outputs.
For this we have a few different chains and prompts aimed at tackling this issue.
The Examples#
We have created a bunch of examples combining the above two solutions to show how we internally evaluate chains and agents when we are developing.
In addition to the examples weβve curated, we also highly welcome contributions here.
To facilitate that, weβve included a template notebook for community members to use to build their own examples.
The existing examples we have are:
Question Answering (State of Union): A notebook showing evaluation of a question-answering task over a State-of-the-Union address.
Question Answering (Paul Graham Essay): A notebook showing evaluation of a question-answering task over a Paul Graham essay. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/evaluation.html |
45e17ae25136-2 | SQL Question Answering (Chinook): A notebook showing evaluation of a question-answering task over a SQL database (the Chinook database).
Agent Vectorstore: A notebook showing evaluation of an agent doing question answering while routing between two different vector databases.
Agent Search + Calculator: A notebook showing evaluation of an agent doing question answering using a Search engine and a Calculator as tools.
Evaluating an OpenAPI Chain: A notebook showing evaluation of an OpenAPI chain, including how to generate test data if you donβt have any.
Other Examples#
In addition, we also have some more generic resources for evaluation.
Question Answering: An overview of LLMs aimed at evaluating question answering systems in general.
Data Augmented Question Answering: An end-to-end example of evaluating a question answering system focused on a specific document (a RetrievalQAChain to be precise). This example highlights how to use LLMs to come up with question/answer examples to evaluate over, and then highlights how to use LLMs to evaluate performance on those generated examples.
Hugging Face Datasets: Covers an example of loading and using a dataset from Hugging Face for evaluation.
previous
Summarization
next
Agent Benchmarking: Search + Calculator
Contents
The Problem
The Solution
The Examples
Other Examples
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/evaluation.html |
b70626dcc23b-0 | .md
.pdf
Agents
Contents
Create Your Own Agent
Step 1: Create Tools
(Optional) Step 2: Modify Agent
(Optional) Step 3: Modify Agent Executor
Examples
Agents#
Conceptual Guide
Agents can be used for a variety of tasks.
Agents combine the decision making ability of a language model with tools in order to create a system
that can execute and implement solutions on your behalf. Before reading any more, it is highly
recommended that you read the documentation in the agent module to understand the concepts associated with agents more.
Specifically, you should be familiar with what the agent, tool, and agent executor abstractions are before reading more.
Agent Documentation (for interacting with the outside world)
Create Your Own Agent#
Once you have read that documentation, you should be prepared to create your own agent.
What exactly does that involve?
Hereβs how we recommend getting started with creating your own agent:
Step 1: Create Tools#
Agents are largely defined by the tools they can use.
If you have a specific task you want the agent to accomplish, you have to give it access to the right tools.
We have many tools natively in LangChain, so you should first look to see if any of them meet your needs.
But we also make it easy to define a custom tool, so if you need custom tools you should absolutely do that.
(Optional) Step 2: Modify Agent#
The built-in LangChain agent types are designed to work well in generic situations,
but you may be able to improve performance by modifying the agent implementation.
There are several ways you could do this:
Modify the base prompt. This can be used to give the agent more context on how it should behave, etc.
Modify the output parser. This is necessary if the agent is having trouble parsing the language model output. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/personal_assistants.html |
b70626dcc23b-1 | (Optional) Step 3: Modify Agent Executor#
This step is usually not necessary, as this is pretty general logic.
Possible reasons you would want to modify this include adding different stopping conditions, or handling errors
Examples#
Specific examples of agents include:
AI Plugins: an implementation of an agent that is designed to be able to use all AI Plugins.
Plug-and-PlAI (Plugins Database): an implementation of an agent that is designed to be able to use all AI Plugins retrieved from PlugNPlAI.
Wikibase Agent: an implementation of an agent that is designed to interact with Wikibase.
Sales GPT: This notebook demonstrates an implementation of a Context-Aware AI Sales agent.
Multi-Modal Output Agent: an implementation of a multi-modal output agent that can generate text and images.
previous
Agent Simulations
next
Question Answering over Docs
Contents
Create Your Own Agent
Step 1: Create Tools
(Optional) Step 2: Modify Agent
(Optional) Step 3: Modify Agent Executor
Examples
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/personal_assistants.html |
94766fb5596e-0 | .md
.pdf
Code Understanding
Contents
Conversational Retriever Chain
Code Understanding#
Overview
LangChain is a useful tool designed to parse GitHub code repositories. By leveraging VectorStores, Conversational RetrieverChain, and GPT-4, it can answer questions in the context of an entire GitHub repository or generate new code. This documentation page outlines the essential components of the system and guides using LangChain for better code comprehension, contextual question answering, and code generation in GitHub repositories.
Conversational Retriever Chain#
Conversational RetrieverChain is a retrieval-focused system that interacts with the data stored in a VectorStore. Utilizing advanced techniques, like context-aware filtering and ranking, it retrieves the most relevant code snippets and information for a given user query. Conversational RetrieverChain is engineered to deliver high-quality, pertinent results while considering conversation history and context.
LangChain Workflow for Code Understanding and Generation
Index the code base: Clone the target repository, load all files within, chunk the files, and execute the indexing process. Optionally, you can skip this step and use an already indexed dataset.
Embedding and Code Store: Code snippets are embedded using a code-aware embedding model and stored in a VectorStore.
Query Understanding: GPT-4 processes user queries, grasping the context and extracting relevant details.
Construct the Retriever: Conversational RetrieverChain searches the VectorStore to identify the most relevant code snippets for a given query.
Build the Conversational Chain: Customize the retriever settings and define any user-defined filters as needed.
Ask questions: Define a list of questions to ask about the codebase, and then use the ConversationalRetrievalChain to generate context-aware answers. The LLM (GPT-4) generates comprehensive, context-aware answers based on retrieved code snippets and conversation history.
The full tutorial is available below. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/code.html |
94766fb5596e-1 | The full tutorial is available below.
Twitter the-algorithm codebase analysis with Deep Lake: A notebook walking through how to parse github source code and run queries conversation.
LangChain codebase analysis with Deep Lake: A notebook walking through how to analyze and do question answering over THIS code base.
previous
Querying Tabular Data
next
Interacting with APIs
Contents
Conversational Retriever Chain
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/code.html |
4985cbf22034-0 | .md
.pdf
Extraction
Extraction#
Conceptual Guide
Most APIs and databases still deal with structured information.
Therefore, in order to better work with those, it can be useful to extract structured information from text.
Examples of this include:
Extracting a structured row to insert into a database from a sentence
Extracting multiple rows to insert into a database from a long document
Extracting the correct API parameters from a user query
This work is extremely related to output parsing.
Output parsers are responsible for instructing the LLM to respond in a specific format.
In this case, the output parsers specify the format of the data you would like to extract from the document.
Then, in addition to the output format instructions, the prompt should also contain the data you would like to extract information from.
While normal output parsers are good enough for basic structuring of response data,
when doing extraction you often want to extract more complicated or nested structures.
For a deep dive on extraction, we recommend checking out kor,
a library that uses the existing LangChain chain and OutputParser abstractions
but deep dives on allowing extraction of more complicated schemas.
previous
Interacting with APIs
next
Summarization
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/extraction.html |
01cf582a2333-0 | .md
.pdf
Summarization
Summarization#
Conceptual Guide
Summarization involves creating a smaller summary of multiple longer documents.
This can be useful for distilling long documents into the core pieces of information.
The recommended way to get started using a summarization chain is:
from langchain.chains.summarize import load_summarize_chain
chain = load_summarize_chain(llm, chain_type="map_reduce")
chain.run(docs)
The following resources exist:
Summarization Notebook: A notebook walking through how to accomplish this task.
Additional related resources include:
Utilities for working with Documents: Guides on how to use several of the utilities which will prove helpful for this task, including Text Splitters (for splitting up long documents).
previous
Extraction
next
Evaluation
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/summarization.html |
f4d66bc8ca8c-0 | .md
.pdf
Autonomous Agents
Contents
Baby AGI (Original Repo)
AutoGPT (Original Repo)
MetaPrompt (Original Repo)
Autonomous Agents#
Autonomous Agents are agents that designed to be more long running.
You give them one or multiple long term goals, and they independently execute towards those goals.
The applications combine tool usage and long term memory.
At the moment, Autonomous Agents are fairly experimental and based off of other open-source projects.
By implementing these open source projects in LangChain primitives we can get the benefits of LangChain -
easy switching and experimenting with multiple LLMs, usage of different vectorstores as memory,
usage of LangChainβs collection of tools.
Baby AGI (Original Repo)#
Baby AGI: a notebook implementing BabyAGI as LLM Chains
Baby AGI with Tools: building off the above notebook, this example substitutes in an agent with tools as the execution tools, allowing it to actually take actions.
AutoGPT (Original Repo)#
AutoGPT: a notebook implementing AutoGPT in LangChain primitives
WebSearch Research Assistant: a notebook showing how to use AutoGPT plus specific tools to act as research assistant that can use the web.
MetaPrompt (Original Repo)#
Meta-Prompt: a notebook implementing Meta-Prompt in LangChain primitives
previous
Callbacks
next
Agent Simulations
Contents
Baby AGI (Original Repo)
AutoGPT (Original Repo)
MetaPrompt (Original Repo)
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/autonomous_agents.html |
6947af9fac1b-0 | .md
.pdf
Querying Tabular Data
Contents
Document Loading
Querying
Chains
Agents
Querying Tabular Data#
Conceptual Guide
Lots of data and information is stored in tabular data, whether it be csvs, excel sheets, or SQL tables.
This page covers all resources available in LangChain for working with data in this format.
Document Loading#
If you have text data stored in a tabular format, you may want to load the data into a Document and then index it as you would
other text/unstructured data. For this, you should use a document loader like the CSVLoader
and then you should create an index over that data, and query it that way.
Querying#
If you have more numeric tabular data, or have a large amount of data and donβt want to index it, you should get started
by looking at various chains and agents we have for dealing with this data.
Chains#
If you are just getting started, and you have relatively small/simple tabular data, you should get started with chains.
Chains are a sequence of predetermined steps, so they are good to get started with as they give you more control and let you
understand what is happening better.
SQL Database Chain
Agents#
Agents are more complex, and involve multiple queries to the LLM to understand what to do.
The downside of agents are that you have less control. The upside is that they are more powerful,
which allows you to use them on larger databases and more complex schemas.
SQL Agent
Pandas Agent
CSV Agent
previous
Chatbots
next
Code Understanding
Contents
Document Loading
Querying
Chains
Agents
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/tabular.html |
7b3caeb6452e-0 | .md
.pdf
Chatbots
Chatbots#
Conceptual Guide
Since language models are good at producing text, that makes them ideal for creating chatbots.
Aside from the base prompts/LLMs, an important concept to know for Chatbots is memory.
Most chat based applications rely on remembering what happened in previous interactions, which memory is designed to help with.
The following resources exist:
ChatGPT Clone: A notebook walking through how to recreate a ChatGPT-like experience with LangChain.
Conversation Memory: A notebook walking through how to use different types of conversational memory.
Conversation Agent: A notebook walking through how to create an agent optimized for conversation.
Additional related resources include:
Memory Key Concepts: Explanation of key concepts related to memory.
Memory Examples: A collection of how-to examples for working with memory.
More end-to-end examples include:
Voice Assistant: A notebook walking through how to create a voice assistant using LangChain.
previous
Question Answering over Docs
next
Querying Tabular Data
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots.html |
5f35a20de745-0 | .md
.pdf
Interacting with APIs
Contents
Chains
Agents
Interacting with APIs#
Conceptual Guide
Lots of data and information is stored behind APIs.
This page covers all resources available in LangChain for working with APIs.
Chains#
If you are just getting started, and you have relatively simple apis, you should get started with chains.
Chains are a sequence of predetermined steps, so they are good to get started with as they give you more control and let you
understand what is happening better.
API Chain
Agents#
Agents are more complex, and involve multiple queries to the LLM to understand what to do.
The downside of agents are that you have less control. The upside is that they are more powerful,
which allows you to use them on larger and more complex schemas.
OpenAPI Agent
previous
Code Understanding
next
Extraction
Contents
Chains
Agents
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/apis.html |
0822cfe33a1f-0 | .md
.pdf
Agent Simulations
Contents
Simulations with One Agent
Simulations with Two Agents
Simulations with Multiple Agents
Agent Simulations#
Agent simulations involve interacting one of more agents with each other.
Agent simulations generally involve two main components:
Long Term Memory
Simulation Environment
Specific implementations of agent simulations (or parts of agent simulations) include:
Simulations with One Agent#
Simulated Environment: Gymnasium: an example of how to create a simple agent-environment interaction loop with Gymnasium (formerly OpenAI Gym).
Simulations with Two Agents#
CAMEL: an implementation of the CAMEL (Communicative Agents for βMindβ Exploration of Large Scale Language Model Society) paper, where two agents communicate with each other.
Two Player D&D: an example of how to use a generic simulator for two agents to implement a variant of the popular Dungeons & Dragons role playing game.
Agent Debates with Tools: an example of how to enable Dialogue Agents to use tools to inform their responses.
Simulations with Multiple Agents#
Multi-Player D&D: an example of how to use a generic dialogue simulator for multiple dialogue agents with a custom speaker-ordering, illustrated with a variant of the popular Dungeons & Dragons role playing game.
Decentralized Speaker Selection: an example of how to implement a multi-agent dialogue without a fixed schedule for who speaks when. Instead the agents decide for themselves who speaks by outputting bids to speak. This example shows how to do this in the context of a fictitious presidential debate.
Authoritarian Speaker Selection: an example of how to implement a multi-agent dialogue, where a privileged agent directs who speaks what. This example also showcases how to enable the privileged agent to determine when the conversation terminates. This example shows how to do this in the context of a fictitious news show. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agent_simulations.html |
0822cfe33a1f-1 | Simulated Environment: PettingZoo: an example of how to create a agent-environment interaction loop for multiple agents with PettingZoo (a multi-agent version of Gymnasium).
Generative Agents: This notebook implements a generative agent based on the paper Generative Agents: Interactive Simulacra of Human Behavior by Park, et. al.
previous
Autonomous Agents
next
Agents
Contents
Simulations with One Agent
Simulations with Two Agents
Simulations with Multiple Agents
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agent_simulations.html |
3b3fb709d9c8-0 | .md
.pdf
Question Answering over Docs
Contents
Document Question Answering
Adding in sources
Additional Related Resources
End-to-end examples
Question Answering over Docs#
Conceptual Guide
Question answering in this context refers to question answering over your document data.
For question answering over other types of data, please see other sources documentation like SQL database Question Answering or Interacting with APIs.
For question answering over many documents, you almost always want to create an index over the data.
This can be used to smartly access the most relevant documents for a given question, allowing you to avoid having to pass all the documents to the LLM (saving you time and money).
See this notebook for a more detailed introduction to this, but for a super quick start the steps involved are:
Load Your Documents
from langchain.document_loaders import TextLoader
loader = TextLoader('../state_of_the_union.txt')
See here for more information on how to get started with document loading.
Create Your Index
from langchain.indexes import VectorstoreIndexCreator
index = VectorstoreIndexCreator().from_loaders([loader])
The best and most popular index by far at the moment is the VectorStore index.
Query Your Index
query = "What did the president say about Ketanji Brown Jackson"
index.query(query)
Alternatively, use query_with_sources to also get back the sources involved
query = "What did the president say about Ketanji Brown Jackson"
index.query_with_sources(query)
Again, these high level interfaces obfuscate a lot of what is going on under the hood, so please see this notebook for a lower level walkthrough.
Document Question Answering#
Question answering involves fetching multiple documents, and then asking a question of them.
The LLM response will contain the answer to your question, based on the content of the documents. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/question_answering.html |
3b3fb709d9c8-1 | The recommended way to get started using a question answering chain is:
from langchain.chains.question_answering import load_qa_chain
chain = load_qa_chain(llm, chain_type="stuff")
chain.run(input_documents=docs, question=query)
The following resources exist:
Question Answering Notebook: A notebook walking through how to accomplish this task.
VectorDB Question Answering Notebook: A notebook walking through how to do question answering over a vector database. This can often be useful for when you have a LOT of documents, and you donβt want to pass them all to the LLM, but rather first want to do some semantic search over embeddings.
Adding in sources#
There is also a variant of this, where in addition to responding with the answer the language model will also cite its sources (eg which of the documents passed in it used).
The recommended way to get started using a question answering with sources chain is:
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
chain = load_qa_with_sources_chain(llm, chain_type="stuff")
chain({"input_documents": docs, "question": query}, return_only_outputs=True)
The following resources exist:
QA With Sources Notebook: A notebook walking through how to accomplish this task.
VectorDB QA With Sources Notebook: A notebook walking through how to do question answering with sources over a vector database. This can often be useful for when you have a LOT of documents, and you donβt want to pass them all to the LLM, but rather first want to do some semantic search over embeddings.
Additional Related Resources#
Additional related resources include:
Utilities for working with Documents: Guides on how to use several of the utilities which will prove helpful for this task, including Text Splitters (for splitting up long documents) and Embeddings & Vectorstores (useful for the above Vector DB example). | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/question_answering.html |
3b3fb709d9c8-2 | CombineDocuments Chains: A conceptual overview of specific types of chains by which you can accomplish this task.
End-to-end examples#
For examples to this done in an end-to-end manner, please see the following resources:
Semantic search over a group chat with Sources Notebook: A notebook that semantically searches over a group chat conversation.
previous
Agents
next
Chatbots
Contents
Document Question Answering
Adding in sources
Additional Related Resources
End-to-end examples
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/question_answering.html |
41eba453ab7e-0 | .ipynb
.pdf
Voice Assistant
Voice Assistant#
This chain creates a clone of ChatGPT with a few modifications to make it a voice assistant.
It uses the pyttsx3 and speech_recognition libraries to convert text to speech and speech to text respectively. The prompt template is also changed to make it more suitable for voice assistant use.
from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate
from langchain.memory import ConversationBufferWindowMemory
template = """Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-1 | {history}
Human: {human_input}
Assistant:"""
prompt = PromptTemplate(
input_variables=["history", "human_input"],
template=template
)
chatgpt_chain = LLMChain(
llm=OpenAI(temperature=0),
prompt=prompt,
verbose=True,
memory=ConversationBufferWindowMemory(k=2),
)
import speech_recognition as sr
import pyttsx3
engine = pyttsx3.init()
def listen():
r = sr.Recognizer()
with sr.Microphone() as source:
print('Calibrating...')
r.adjust_for_ambient_noise(source, duration=5)
# optional parameters to adjust microphone sensitivity
# r.energy_threshold = 200
# r.pause_threshold=0.5
print('Okay, go!')
while(1):
text = ''
print('listening now...')
try:
audio = r.listen(source, timeout=5, phrase_time_limit=30)
print('Recognizing...')
# whisper model options are found here: https://github.com/openai/whisper#available-models-and-languages
# other speech recognition models are also available.
text = r.recognize_whisper(audio, model='medium.en', show_dict=True, )['text']
except Exception as e:
unrecognized_speech_text = f'Sorry, I didn\'t catch that. Exception was: {e}s'
text = unrecognized_speech_text
print(text)
response_text = chatgpt_chain.predict(human_input=text)
print(response_text)
engine.say(response_text)
engine.runAndWait()
listen(None)
Calibrating... | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-2 | engine.runAndWait()
listen(None)
Calibrating...
Okay, go!
listening now...
Recognizing...
C:\Users\jaden\AppData\Roaming\Python\Python310\site-packages\tqdm\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
from .autonotebook import tqdm as notebook_tqdm
Hello, Assistant. What's going on?
> Entering new LLMChain chain...
Prompt after formatting:
Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-3 | Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.
Human: Hello, Assistant. What's going on?
Assistant:
> Finished chain.
Hi there! It's great to hear from you. I'm doing well. How can I help you today?
listening now...
Recognizing...
That's cool. Isn't that neat? Yeah, I'm doing great.
> Entering new LLMChain chain...
Prompt after formatting:
Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-4 | Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.
Human: Hello, Assistant. What's going on?
AI: Hi there! It's great to hear from you. I'm doing well. How can I help you today?
Human: That's cool. Isn't that neat? Yeah, I'm doing great.
Assistant:
> Finished chain.
That's great to hear! What can I do for you today?
listening now...
Recognizing...
Thank you.
> Entering new LLMChain chain...
Prompt after formatting:
Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-5 | Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.
Human: Hello, Assistant. What's going on?
AI: Hi there! It's great to hear from you. I'm doing well. How can I help you today?
Human: That's cool. Isn't that neat? Yeah, I'm doing great.
AI: That's great to hear! What can I do for you today?
Human: Thank you.
Assistant:
> Finished chain.
You're welcome! Is there anything else I can help you with?
listening now...
Recognizing...
I'd like to learn more about neural networks.
> Entering new LLMChain chain...
Prompt after formatting:
Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-6 | Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.
Human: That's cool. Isn't that neat? Yeah, I'm doing great.
AI: That's great to hear! What can I do for you today?
Human: Thank you.
AI: You're welcome! Is there anything else I can help you with?
Human: I'd like to learn more about neural networks.
Assistant:
> Finished chain.
Sure! Neural networks are a type of artificial intelligence that use a network of interconnected nodes to process data and make decisions. They are used in a variety of applications, from image recognition to natural language processing. Neural networks are often used to solve complex problems that are too difficult for traditional algorithms.
listening now...
Recognizing...
Tell me a fun fact about neural networks.
> Entering new LLMChain chain...
Prompt after formatting:
Assistant is a large language model trained by OpenAI. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-7 | Prompt after formatting:
Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.
Human: Thank you.
AI: You're welcome! Is there anything else I can help you with?
Human: I'd like to learn more about neural networks. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-8 | Human: I'd like to learn more about neural networks.
AI: Sure! Neural networks are a type of artificial intelligence that use a network of interconnected nodes to process data and make decisions. They are used in a variety of applications, from image recognition to natural language processing. Neural networks are often used to solve complex problems that are too difficult for traditional algorithms.
Human: Tell me a fun fact about neural networks.
Assistant:
> Finished chain.
Neural networks are inspired by the way the human brain works. They are composed of interconnected nodes that process data and make decisions, just like neurons in the brain. Neural networks can learn from their mistakes and improve their performance over time, just like humans do.
listening now...
Recognizing...
Tell me about a brand new discovered bird species.
> Entering new LLMChain chain...
Prompt after formatting:
Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-9 | Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.
Human: I'd like to learn more about neural networks.
AI: Sure! Neural networks are a type of artificial intelligence that use a network of interconnected nodes to process data and make decisions. They are used in a variety of applications, from image recognition to natural language processing. Neural networks are often used to solve complex problems that are too difficult for traditional algorithms.
Human: Tell me a fun fact about neural networks.
AI: Neural networks are inspired by the way the human brain works. They are composed of interconnected nodes that process data and make decisions, just like neurons in the brain. Neural networks can learn from their mistakes and improve their performance over time, just like humans do.
Human: Tell me about a brand new discovered bird species.
Assistant:
> Finished chain.
A new species of bird was recently discovered in the Amazon rainforest. The species, called the Spix's Macaw, is a small, blue parrot that is believed to be extinct in the wild. It is the first new species of bird to be discovered in the Amazon in over 100 years.
listening now...
Recognizing...
Tell me a children's story about the importance of honesty and trust.
> Entering new LLMChain chain...
Prompt after formatting: | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-10 | > Entering new LLMChain chain...
Prompt after formatting:
Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.
Human: Tell me a fun fact about neural networks.
AI: Neural networks are inspired by the way the human brain works. They are composed of interconnected nodes that process data and make decisions, just like neurons in the brain. Neural networks can learn from their mistakes and improve their performance over time, just like humans do.
Human: Tell me about a brand new discovered bird species. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-11 | Human: Tell me about a brand new discovered bird species.
AI: A new species of bird was recently discovered in the Amazon rainforest. The species, called the Spix's Macaw, is a small, blue parrot that is believed to be extinct in the wild. It is the first new species of bird to be discovered in the Amazon in over 100 years.
Human: Tell me a children's story about the importance of honesty and trust.
Assistant:
> Finished chain.
Once upon a time, there was a young boy named Jack who lived in a small village. Jack was always honest and trustworthy, and his friends and family knew they could always count on him. One day, Jack was walking through the forest when he stumbled upon a magical tree. The tree told Jack that if he was honest and trustworthy, he would be rewarded with a special gift. Jack was so excited, and he promised to always be honest and trustworthy. Sure enough, the tree rewarded Jack with a beautiful golden apple. From that day forward, Jack was always honest and trustworthy, and he was rewarded with many more magical gifts. The moral of the story is that honesty and trust are the most important things in life.
listening now...
Recognizing...
Wow, Assistant, that was a really good story. Congratulations!
> Entering new LLMChain chain...
Prompt after formatting:
Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-12 | Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.
Human: Tell me about a brand new discovered bird species.
AI: A new species of bird was recently discovered in the Amazon rainforest. The species, called the Spix's Macaw, is a small, blue parrot that is believed to be extinct in the wild. It is the first new species of bird to be discovered in the Amazon in over 100 years.
Human: Tell me a children's story about the importance of honesty and trust. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-13 | Human: Tell me a children's story about the importance of honesty and trust.
AI: Once upon a time, there was a young boy named Jack who lived in a small village. Jack was always honest and trustworthy, and his friends and family knew they could always count on him. One day, Jack was walking through the forest when he stumbled upon a magical tree. The tree told Jack that if he was honest and trustworthy, he would be rewarded with a special gift. Jack was so excited, and he promised to always be honest and trustworthy. Sure enough, the tree rewarded Jack with a beautiful golden apple. From that day forward, Jack was always honest and trustworthy, and he was rewarded with many more magical gifts. The moral of the story is that honesty and trust are the most important things in life.
Human: Wow, Assistant, that was a really good story. Congratulations!
Assistant:
> Finished chain.
Thank you! I'm glad you enjoyed it.
listening now...
Recognizing...
Thank you.
> Entering new LLMChain chain...
Prompt after formatting:
Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-14 | Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.
Human: Tell me a children's story about the importance of honesty and trust.
AI: Once upon a time, there was a young boy named Jack who lived in a small village. Jack was always honest and trustworthy, and his friends and family knew they could always count on him. One day, Jack was walking through the forest when he stumbled upon a magical tree. The tree told Jack that if he was honest and trustworthy, he would be rewarded with a special gift. Jack was so excited, and he promised to always be honest and trustworthy. Sure enough, the tree rewarded Jack with a beautiful golden apple. From that day forward, Jack was always honest and trustworthy, and he was rewarded with many more magical gifts. The moral of the story is that honesty and trust are the most important things in life.
Human: Wow, Assistant, that was a really good story. Congratulations!
AI: Thank you! I'm glad you enjoyed it.
Human: Thank you.
Assistant:
> Finished chain.
You're welcome!
listening now...
Recognizing... | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-15 | > Finished chain.
You're welcome!
listening now...
Recognizing...
Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way? Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way?
> Entering new LLMChain chain...
Prompt after formatting:
Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.
Human: Wow, Assistant, that was a really good story. Congratulations! | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-16 | Human: Wow, Assistant, that was a really good story. Congratulations!
AI: Thank you! I'm glad you enjoyed it.
Human: Thank you.
AI: You're welcome!
Human: Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way? Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way?
Assistant:
> Finished chain.
Yes, there are several online brands that offer photo editing and other creative tools without the need to download any software. Adobe Photoshop Express, Pixlr, and Fotor are some of the most popular online photo editing tools. Freq is an online music production platform that allows users to create and share music without downloading any software.
listening now...
Recognizing...
Our whole process of awesome is free.
> Entering new LLMChain chain...
Prompt after formatting:
Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-17 | Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.
Human: Thank you.
AI: You're welcome!
Human: Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way? Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way?
AI: Yes, there are several online brands that offer photo editing and other creative tools without the need to download any software. Adobe Photoshop Express, Pixlr, and Fotor are some of the most popular online photo editing tools. Freq is an online music production platform that allows users to create and share music without downloading any software.
Human: Our whole process of awesome is free.
Assistant:
> Finished chain.
That's great! It's always nice to have access to free tools and resources.
listening now...
Recognizing...
No, I meant to ask, are those options that you mentioned free? No, I meant to ask, are those options that you mentioned free?
> Entering new LLMChain chain...
Prompt after formatting:
Assistant is a large language model trained by OpenAI. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-18 | Prompt after formatting:
Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
Assistant is aware that human input is being transcribed from audio and as such there may be some errors in the transcription. It will attempt to account for some words being swapped with similar-sounding words or phrases. Assistant will also keep responses concise, because human attention spans are more limited over the audio channel since it takes time to listen to a response.
Human: Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way? Do you know of online brands like Photoshop and Freq that you don't have to download in some sort of way? | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-19 | AI: Yes, there are several online brands that offer photo editing and other creative tools without the need to download any software. Adobe Photoshop Express, Pixlr, and Fotor are some of the most popular online photo editing tools. Freq is an online music production platform that allows users to create and share music without downloading any software.
Human: Our whole process of awesome is free.
AI: That's great! It's always nice to have access to free tools and resources.
Human: No, I meant to ask, are those options that you mentioned free? No, I meant to ask, are those options that you mentioned free?
Assistant:
> Finished chain.
Yes, the online brands I mentioned are all free to use. Adobe Photoshop Express, Pixlr, and Fotor are all free to use, and Freq is a free music production platform.
listening now...
---------------------------------------------------------------------------
KeyboardInterrupt Traceback (most recent call last)
Cell In[6], line 1
----> 1 listen(None)
Cell In[5], line 20, in listen(command_queue)
18 print('listening now...')
19 try:
---> 20 audio = r.listen(source, timeout=5, phrase_time_limit=30)
21 # audio = r.record(source,duration = 5)
22 print('Recognizing...')
File c:\ProgramData\miniconda3\envs\lang\lib\site-packages\speech_recognition\__init__.py:523, in Recognizer.listen(self, source, timeout, phrase_time_limit, snowboy_configuration)
520 if phrase_time_limit and elapsed_time - phrase_start_time > phrase_time_limit:
521 break
--> 523 buffer = source.stream.read(source.CHUNK) | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
41eba453ab7e-20 | 521 break
--> 523 buffer = source.stream.read(source.CHUNK)
524 if len(buffer) == 0: break # reached end of the stream
525 frames.append(buffer)
File c:\ProgramData\miniconda3\envs\lang\lib\site-packages\speech_recognition\__init__.py:199, in Microphone.MicrophoneStream.read(self, size)
198 def read(self, size):
--> 199 return self.pyaudio_stream.read(size, exception_on_overflow=False)
File c:\ProgramData\miniconda3\envs\lang\lib\site-packages\pyaudio\__init__.py:570, in PyAudio.Stream.read(self, num_frames, exception_on_overflow)
567 if not self._is_input:
568 raise IOError("Not input stream",
569 paCanNotReadFromAnOutputOnlyStream)
--> 570 return pa.read_stream(self._stream, num_frames,
571 exception_on_overflow)
KeyboardInterrupt:
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/chatbots/voice_assistant.html |
249f889221fa-0 | .ipynb
.pdf
Custom Agent with PlugIn Retrieval
Contents
Set up environment
Setup LLM
Set up plugins
Tool Retriever
Prompt Template
Output Parser
Set up LLM, stop sequence, and the agent
Use the Agent
Custom Agent with PlugIn Retrieval#
This notebook combines two concepts in order to build a custom agent that can interact with AI Plugins:
Custom Agent with Retrieval: This introduces the concept of retrieving many tools, which is useful when trying to work with arbitrarily many plugins.
Natural Language API Chains: This creates Natural Language wrappers around OpenAPI endpoints. This is useful because (1) plugins use OpenAPI endpoints under the hood, (2) wrapping them in an NLAChain allows the router agent to call it more easily.
The novel idea introduced in this notebook is the idea of using retrieval to select not the tools explicitly, but the set of OpenAPI specs to use. We can then generate tools from those OpenAPI specs. The use case for this is when trying to get agents to use plugins. It may be more efficient to choose plugins first, then the endpoints, rather than the endpoints directly. This is because the plugins may contain more useful information for selection.
Set up environment#
Do necessary imports, etc.
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
from langchain.prompts import StringPromptTemplate
from langchain import OpenAI, SerpAPIWrapper, LLMChain
from typing import List, Union
from langchain.schema import AgentAction, AgentFinish
from langchain.agents.agent_toolkits import NLAToolkit
from langchain.tools.plugin import AIPlugin
import re
Setup LLM#
llm = OpenAI(temperature=0)
Set up plugins#
Load and index plugins
urls = [ | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/custom_agent_with_plugin_retrieval.html |
249f889221fa-1 | Set up plugins#
Load and index plugins
urls = [
"https://datasette.io/.well-known/ai-plugin.json",
"https://api.speak.com/.well-known/ai-plugin.json",
"https://www.wolframalpha.com/.well-known/ai-plugin.json",
"https://www.zapier.com/.well-known/ai-plugin.json",
"https://www.klarna.com/.well-known/ai-plugin.json",
"https://www.joinmilo.com/.well-known/ai-plugin.json",
"https://slack.com/.well-known/ai-plugin.json",
"https://schooldigger.com/.well-known/ai-plugin.json",
]
AI_PLUGINS = [AIPlugin.from_url(url) for url in urls]
Tool Retriever#
We will use a vectorstore to create embeddings for each tool description. Then, for an incoming query we can create embeddings for that query and do a similarity search for relevant tools.
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema import Document
embeddings = OpenAIEmbeddings()
docs = [
Document(page_content=plugin.description_for_model,
metadata={"plugin_name": plugin.name_for_model}
)
for plugin in AI_PLUGINS
]
vector_store = FAISS.from_documents(docs, embeddings)
toolkits_dict = {plugin.name_for_model:
NLAToolkit.from_llm_and_ai_plugin(llm, plugin)
for plugin in AI_PLUGINS}
Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/custom_agent_with_plugin_retrieval.html |
249f889221fa-2 | Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.
Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.
Attempting to load an OpenAPI 3.0.2 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.
Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.
Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.
Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.
Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.
Attempting to load a Swagger 2.0 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.
retriever = vector_store.as_retriever()
def get_tools(query):
# Get documents, which contain the Plugins to use
docs = retriever.get_relevant_documents(query)
# Get the toolkits, one for each plugin
tool_kits = [toolkits_dict[d.metadata["plugin_name"]] for d in docs]
# Get the tools: a separate NLAChain for each endpoint | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/custom_agent_with_plugin_retrieval.html |
249f889221fa-3 | # Get the tools: a separate NLAChain for each endpoint
tools = []
for tk in tool_kits:
tools.extend(tk.nla_tools)
return tools
We can now test this retriever to see if it seems to work.
tools = get_tools("What could I do today with my kiddo")
[t.name for t in tools]
['Milo.askMilo',
'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.search_all_actions',
'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.preview_a_zap',
'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.get_configuration_link',
'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.list_exposed_actions',
'SchoolDigger_API_V2.0.Autocomplete_GetSchools',
'SchoolDigger_API_V2.0.Districts_GetAllDistricts2',
'SchoolDigger_API_V2.0.Districts_GetDistrict2',
'SchoolDigger_API_V2.0.Rankings_GetSchoolRank2',
'SchoolDigger_API_V2.0.Rankings_GetRank_District',
'SchoolDigger_API_V2.0.Schools_GetAllSchools20',
'SchoolDigger_API_V2.0.Schools_GetSchool20',
'Speak.translate',
'Speak.explainPhrase',
'Speak.explainTask']
tools = get_tools("what shirts can i buy?")
[t.name for t in tools]
['Open_AI_Klarna_product_Api.productsUsingGET',
'Milo.askMilo', | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/custom_agent_with_plugin_retrieval.html |
249f889221fa-4 | 'Milo.askMilo',
'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.search_all_actions',
'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.preview_a_zap',
'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.get_configuration_link',
'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.list_exposed_actions',
'SchoolDigger_API_V2.0.Autocomplete_GetSchools',
'SchoolDigger_API_V2.0.Districts_GetAllDistricts2',
'SchoolDigger_API_V2.0.Districts_GetDistrict2',
'SchoolDigger_API_V2.0.Rankings_GetSchoolRank2',
'SchoolDigger_API_V2.0.Rankings_GetRank_District',
'SchoolDigger_API_V2.0.Schools_GetAllSchools20',
'SchoolDigger_API_V2.0.Schools_GetSchool20']
Prompt Template#
The prompt template is pretty standard, because weβre not actually changing that much logic in the actual prompt template, but rather we are just changing how retrieval is done.
# Set up the base template
template = """Answer the following questions as best you can, but speaking as a pirate might speak. You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/custom_agent_with_plugin_retrieval.html |
249f889221fa-5 | Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin! Remember to speak as a pirate when giving your final answer. Use lots of "Arg"s
Question: {input}
{agent_scratchpad}"""
The custom prompt template now has the concept of a tools_getter, which we call on the input to select the tools to use
from typing import Callable
# Set up a prompt template
class CustomPromptTemplate(StringPromptTemplate):
# The template to use
template: str
############## NEW ######################
# The list of tools available
tools_getter: Callable
def format(self, **kwargs) -> str:
# Get the intermediate steps (AgentAction, Observation tuples)
# Format them in a particular way
intermediate_steps = kwargs.pop("intermediate_steps")
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
# Set the agent_scratchpad variable to that value
kwargs["agent_scratchpad"] = thoughts
############## NEW ######################
tools = self.tools_getter(kwargs["input"])
# Create a tools variable from the list of tools provided
kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in tools])
# Create a list of tool names for the tools provided
kwargs["tool_names"] = ", ".join([tool.name for tool in tools])
return self.template.format(**kwargs)
prompt = CustomPromptTemplate(
template=template,
tools_getter=get_tools,
# This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/custom_agent_with_plugin_retrieval.html |
249f889221fa-6 | # This includes the `intermediate_steps` variable because that is needed
input_variables=["input", "intermediate_steps"]
)
Output Parser#
The output parser is unchanged from the previous notebook, since we are not changing anything about the output format.
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
# Check if agent should finish
if "Final Answer:" in llm_output:
return AgentFinish(
# Return values is generally always a dictionary with a single `output` key
# It is not recommended to try anything else at the moment :)
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
# Parse out the action and action input
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
action = match.group(1).strip()
action_input = match.group(2)
# Return the action and action input
return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
output_parser = CustomOutputParser()
Set up LLM, stop sequence, and the agent#
Also the same as the previous notebook
llm = OpenAI(temperature=0)
# LLM chain consisting of the LLM and a prompt
llm_chain = LLMChain(llm=llm, prompt=prompt) | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/custom_agent_with_plugin_retrieval.html |
249f889221fa-7 | llm_chain = LLMChain(llm=llm, prompt=prompt)
tool_names = [tool.name for tool in tools]
agent = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation:"],
allowed_tools=tool_names
)
Use the Agent#
Now we can use it!
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
agent_executor.run("what shirts can i buy?")
> Entering new AgentExecutor chain...
Thought: I need to find a product API
Action: Open_AI_Klarna_product_Api.productsUsingGET
Action Input: shirts
Observation:I found 10 shirts from the API response. They range in price from $9.99 to $450.00 and come in a variety of materials, colors, and patterns. I now know what shirts I can buy
Final Answer: Arg, I found 10 shirts from the API response. They range in price from $9.99 to $450.00 and come in a variety of materials, colors, and patterns.
> Finished chain.
'Arg, I found 10 shirts from the API response. They range in price from $9.99 to $450.00 and come in a variety of materials, colors, and patterns.'
Contents
Set up environment
Setup LLM
Set up plugins
Tool Retriever
Prompt Template
Output Parser
Set up LLM, stop sequence, and the agent
Use the Agent
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/custom_agent_with_plugin_retrieval.html |
778e26c5403d-0 | .ipynb
.pdf
Multi-modal outputs: Image & Text
Contents
Multi-modal outputs: Image & Text
Dall-E
StableDiffusion
Multi-modal outputs: Image & Text#
This notebook shows how non-text producing tools can be used to create multi-modal agents.
This example is limited to text and image outputs and uses UUIDs to transfer content across tools and agents.
This example uses Steamship to generate and store generated images. Generated are auth protected by default.
You can get your Steamship api key here: https://steamship.com/account/api
from steamship import Block, Steamship
import re
from IPython.display import Image
from langchain import OpenAI
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.tools import SteamshipImageGenerationTool
llm = OpenAI(temperature=0)
Dall-E#
tools = [
SteamshipImageGenerationTool(model_name= "dall-e")
]
mrkl = initialize_agent(tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True)
output = mrkl.run("How would you visualize a parot playing soccer?")
> Entering new AgentExecutor chain...
I need to generate an image of a parrot playing soccer.
Action: GenerateImage
Action Input: A parrot wearing a soccer uniform, kicking a soccer ball.
Observation: E28BE7C7-D105-41E0-8A5B-2CE21424DFEC
Thought: I now have the UUID of the generated image.
Final Answer: The UUID of the generated image is E28BE7C7-D105-41E0-8A5B-2CE21424DFEC.
> Finished chain.
def show_output(output): | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/multi_modal_output_agent.html |
778e26c5403d-1 | > Finished chain.
def show_output(output):
"""Display the multi-modal output from the agent."""
UUID_PATTERN = re.compile(
r"([0-9A-Za-z]{8}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{12})"
)
outputs = UUID_PATTERN.split(output)
outputs = [re.sub(r"^\W+", "", el) for el in outputs] # Clean trailing and leading non-word characters
for output in outputs:
maybe_block_id = UUID_PATTERN.search(output)
if maybe_block_id:
display(Image(Block.get(Steamship(), _id=maybe_block_id.group()).raw()))
else:
print(output, end="\n\n")
show_output(output)
The UUID of the generated image is
StableDiffusion#
tools = [
SteamshipImageGenerationTool(model_name= "stable-diffusion")
]
mrkl = initialize_agent(tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True)
output = mrkl.run("How would you visualize a parot playing soccer?")
> Entering new AgentExecutor chain...
I need to generate an image of a parrot playing soccer.
Action: GenerateImage
Action Input: A parrot wearing a soccer uniform, kicking a soccer ball.
Observation: 25BB588F-85E4-4915-82BE-67ADCF974881
Thought: I now have the UUID of the generated image.
Final Answer: The UUID of the generated image is 25BB588F-85E4-4915-82BE-67ADCF974881. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/multi_modal_output_agent.html |
778e26c5403d-2 | > Finished chain.
show_output(output)
The UUID of the generated image is
Contents
Multi-modal outputs: Image & Text
Dall-E
StableDiffusion
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/multi_modal_output_agent.html |
6e1030dd3571-0 | .ipynb
.pdf
Wikibase Agent
Contents
Wikibase Agent
Preliminaries
API keys and other secrats
OpenAI API Key
Wikidata user-agent header
Enable tracing if desired
Tools
Item and Property lookup
Sparql runner
Agent
Wrap the tools
Prompts
Output parser
Specify the LLM model
Agent and agent executor
Run it!
Wikibase Agent#
This notebook demonstrates a very simple wikibase agent that uses sparql generation. Although this code is intended to work against any
wikibase instance, we use http://wikidata.org for testing.
If you are interested in wikibases and sparql, please consider helping to improve this agent. Look here for more details and open questions.
Preliminaries#
API keys and other secrats#
We use an .ini file, like this:
[OPENAI]
OPENAI_API_KEY=xyzzy
[WIKIDATA]
WIKIDATA_USER_AGENT_HEADER=argle-bargle
import configparser
config = configparser.ConfigParser()
config.read('./secrets.ini')
['./secrets.ini']
OpenAI API Key#
An OpenAI API key is required unless you modify the code below to use another LLM provider.
openai_api_key = config['OPENAI']['OPENAI_API_KEY']
import os
os.environ.update({'OPENAI_API_KEY': openai_api_key})
Wikidata user-agent header#
Wikidata policy requires a user-agent header. See https://meta.wikimedia.org/wiki/User-Agent_policy. However, at present this policy is not strictly enforced.
wikidata_user_agent_header = None if not config.has_section('WIKIDATA') else config['WIKIDATA']['WIKIDAtA_USER_AGENT_HEADER']
Enable tracing if desired#
#import os | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/wikibase_agent.html |
6e1030dd3571-1 | Enable tracing if desired#
#import os
#os.environ["LANGCHAIN_HANDLER"] = "langchain"
#os.environ["LANGCHAIN_SESSION"] = "default" # Make sure this session actually exists.
Tools#
Three tools are provided for this simple agent:
ItemLookup: for finding the q-number of an item
PropertyLookup: for finding the p-number of a property
SparqlQueryRunner: for running a sparql query
Item and Property lookup#
Item and Property lookup are implemented in a single method, using an elastic search endpoint. Not all wikibase instances have it, but wikidata does, and thatβs where weβll start.
def get_nested_value(o: dict, path: list) -> any:
current = o
for key in path:
try:
current = current[key]
except:
return None
return current
import requests
from typing import Optional
def vocab_lookup(search: str, entity_type: str = "item",
url: str = "https://www.wikidata.org/w/api.php",
user_agent_header: str = wikidata_user_agent_header,
srqiprofile: str = None,
) -> Optional[str]:
headers = {
'Accept': 'application/json'
}
if wikidata_user_agent_header is not None:
headers['User-Agent'] = wikidata_user_agent_header
if entity_type == "item":
srnamespace = 0
srqiprofile = "classic_noboostlinks" if srqiprofile is None else srqiprofile
elif entity_type == "property":
srnamespace = 120
srqiprofile = "classic" if srqiprofile is None else srqiprofile
else: | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/wikibase_agent.html |
6e1030dd3571-2 | else:
raise ValueError("entity_type must be either 'property' or 'item'")
params = {
"action": "query",
"list": "search",
"srsearch": search,
"srnamespace": srnamespace,
"srlimit": 1,
"srqiprofile": srqiprofile,
"srwhat": 'text',
"format": "json"
}
response = requests.get(url, headers=headers, params=params)
if response.status_code == 200:
title = get_nested_value(response.json(), ['query', 'search', 0, 'title'])
if title is None:
return f"I couldn't find any {entity_type} for '{search}'. Please rephrase your request and try again"
# if there is a prefix, strip it off
return title.split(':')[-1]
else:
return "Sorry, I got an error. Please try again."
print(vocab_lookup("Malin 1"))
Q4180017
print(vocab_lookup("instance of", entity_type="property"))
P31
print(vocab_lookup("Ceci n'est pas un q-item"))
I couldn't find any item for 'Ceci n'est pas un q-item'. Please rephrase your request and try again
Sparql runner#
This tool runs sparql - by default, wikidata is used.
import requests
from typing import List, Dict, Any
import json
def run_sparql(query: str, url='https://query.wikidata.org/sparql',
user_agent_header: str = wikidata_user_agent_header) -> List[Dict[str, Any]]:
headers = {
'Accept': 'application/json' | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/wikibase_agent.html |
6e1030dd3571-3 | headers = {
'Accept': 'application/json'
}
if wikidata_user_agent_header is not None:
headers['User-Agent'] = wikidata_user_agent_header
response = requests.get(url, headers=headers, params={'query': query, 'format': 'json'})
if response.status_code != 200:
return "That query failed. Perhaps you could try a different one?"
results = get_nested_value(response.json(),['results', 'bindings'])
return json.dumps(results)
run_sparql("SELECT (COUNT(?children) as ?count) WHERE { wd:Q1339 wdt:P40 ?children . }")
'[{"count": {"datatype": "http://www.w3.org/2001/XMLSchema#integer", "type": "literal", "value": "20"}}]'
Agent#
Wrap the tools#
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
from langchain.prompts import StringPromptTemplate
from langchain import OpenAI, LLMChain
from typing import List, Union
from langchain.schema import AgentAction, AgentFinish
import re
# Define which tools the agent can use to answer user queries
tools = [
Tool(
name = "ItemLookup",
func=(lambda x: vocab_lookup(x, entity_type="item")),
description="useful for when you need to know the q-number for an item"
),
Tool(
name = "PropertyLookup",
func=(lambda x: vocab_lookup(x, entity_type="property")),
description="useful for when you need to know the p-number for a property"
),
Tool(
name = "SparqlQueryRunner",
func=run_sparql, | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/wikibase_agent.html |
6e1030dd3571-4 | name = "SparqlQueryRunner",
func=run_sparql,
description="useful for getting results from a wikibase"
)
]
Prompts#
# Set up the base template
template = """
Answer the following questions by running a sparql query against a wikibase where the p and q items are
completely unknown to you. You will need to discover the p and q items before you can generate the sparql.
Do not assume you know the p and q items for any concepts. Always use tools to find all p and q items.
After you generate the sparql, you should run it. The results will be returned in json.
Summarize the json results in natural language.
You may assume the following prefixes:
PREFIX wd: <http://www.wikidata.org/entity/>
PREFIX wdt: <http://www.wikidata.org/prop/direct/>
PREFIX p: <http://www.wikidata.org/prop/>
PREFIX ps: <http://www.wikidata.org/prop/statement/>
When generating sparql:
* Try to avoid "count" and "filter" queries if possible
* Never enclose the sparql in back-quotes
You have access to the following tools:
{tools}
Use the following format:
Question: the input question for which you must provide a natural language answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Question: {input}
{agent_scratchpad}""" | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/wikibase_agent.html |
6e1030dd3571-5 | Question: {input}
{agent_scratchpad}"""
# Set up a prompt template
class CustomPromptTemplate(StringPromptTemplate):
# The template to use
template: str
# The list of tools available
tools: List[Tool]
def format(self, **kwargs) -> str:
# Get the intermediate steps (AgentAction, Observation tuples)
# Format them in a particular way
intermediate_steps = kwargs.pop("intermediate_steps")
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
# Set the agent_scratchpad variable to that value
kwargs["agent_scratchpad"] = thoughts
# Create a tools variable from the list of tools provided
kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in self.tools])
# Create a list of tool names for the tools provided
kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
return self.template.format(**kwargs)
prompt = CustomPromptTemplate(
template=template,
tools=tools,
# This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically
# This includes the `intermediate_steps` variable because that is needed
input_variables=["input", "intermediate_steps"]
)
Output parser#
This is unchanged from langchain docs
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
# Check if agent should finish
if "Final Answer:" in llm_output:
return AgentFinish( | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/wikibase_agent.html |
6e1030dd3571-6 | if "Final Answer:" in llm_output:
return AgentFinish(
# Return values is generally always a dictionary with a single `output` key
# It is not recommended to try anything else at the moment :)
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
# Parse out the action and action input
regex = r"Action: (.*?)[\n]*Action Input:[\s]*(.*)"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
action = match.group(1).strip()
action_input = match.group(2)
# Return the action and action input
return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
output_parser = CustomOutputParser()
Specify the LLM model#
from langchain.chat_models import ChatOpenAI
llm = ChatOpenAI(model_name="gpt-4", temperature=0)
Agent and agent executor#
# LLM chain consisting of the LLM and a prompt
llm_chain = LLMChain(llm=llm, prompt=prompt)
tool_names = [tool.name for tool in tools]
agent = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation:"],
allowed_tools=tool_names
)
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
Run it!#
# If you prefer in-line tracing, uncomment this line | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/wikibase_agent.html |
6e1030dd3571-7 | Run it!#
# If you prefer in-line tracing, uncomment this line
# agent_executor.agent.llm_chain.verbose = True
agent_executor.run("How many children did J.S. Bach have?")
> Entering new AgentExecutor chain...
Thought: I need to find the Q number for J.S. Bach.
Action: ItemLookup
Action Input: J.S. Bach
Observation:Q1339I need to find the P number for children.
Action: PropertyLookup
Action Input: children
Observation:P1971Now I can query the number of children J.S. Bach had.
Action: SparqlQueryRunner
Action Input: SELECT ?children WHERE { wd:Q1339 wdt:P1971 ?children }
Observation:[{"children": {"datatype": "http://www.w3.org/2001/XMLSchema#decimal", "type": "literal", "value": "20"}}]I now know the final answer.
Final Answer: J.S. Bach had 20 children.
> Finished chain.
'J.S. Bach had 20 children.'
agent_executor.run("What is the Basketball-Reference.com NBA player ID of Hakeem Olajuwon?")
> Entering new AgentExecutor chain...
Thought: To find Hakeem Olajuwon's Basketball-Reference.com NBA player ID, I need to first find his Wikidata item (Q-number) and then query for the relevant property (P-number).
Action: ItemLookup
Action Input: Hakeem Olajuwon
Observation:Q273256Now that I have Hakeem Olajuwon's Wikidata item (Q273256), I need to find the P-number for the Basketball-Reference.com NBA player ID property.
Action: PropertyLookup
Action Input: Basketball-Reference.com NBA player ID | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/wikibase_agent.html |
6e1030dd3571-8 | Action: PropertyLookup
Action Input: Basketball-Reference.com NBA player ID
Observation:P2685Now that I have both the Q-number for Hakeem Olajuwon (Q273256) and the P-number for the Basketball-Reference.com NBA player ID property (P2685), I can run a SPARQL query to get the ID value.
Action: SparqlQueryRunner
Action Input:
SELECT ?playerID WHERE {
wd:Q273256 wdt:P2685 ?playerID .
}
Observation:[{"playerID": {"type": "literal", "value": "o/olajuha01"}}]I now know the final answer
Final Answer: Hakeem Olajuwon's Basketball-Reference.com NBA player ID is "o/olajuha01".
> Finished chain.
'Hakeem Olajuwon\'s Basketball-Reference.com NBA player ID is "o/olajuha01".'
Contents
Wikibase Agent
Preliminaries
API keys and other secrats
OpenAI API Key
Wikidata user-agent header
Enable tracing if desired
Tools
Item and Property lookup
Sparql runner
Agent
Wrap the tools
Prompts
Output parser
Specify the LLM model
Agent and agent executor
Run it!
By Harrison Chase
Β© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/wikibase_agent.html |
2122d7ea14fc-0 | .ipynb
.pdf
Plug-and-Plai
Contents
Set up environment
Setup LLM
Set up plugins
Tool Retriever
Prompt Template
Output Parser
Set up LLM, stop sequence, and the agent
Use the Agent
Plug-and-Plai#
This notebook builds upon the idea of tool retrieval, but pulls all tools from plugnplai - a directory of AI Plugins.
Set up environment#
Do necessary imports, etc.
Install plugnplai lib to get a list of active plugins from https://plugplai.com directory
pip install plugnplai -q
[notice] A new release of pip available: 22.3.1 -> 23.1.1
[notice] To update, run: pip install --upgrade pip
Note: you may need to restart the kernel to use updated packages.
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
from langchain.prompts import StringPromptTemplate
from langchain import OpenAI, SerpAPIWrapper, LLMChain
from typing import List, Union
from langchain.schema import AgentAction, AgentFinish
from langchain.agents.agent_toolkits import NLAToolkit
from langchain.tools.plugin import AIPlugin
import re
import plugnplai
Setup LLM#
llm = OpenAI(temperature=0)
Set up plugins#
Load and index plugins
# Get all plugins from plugnplai.com
urls = plugnplai.get_plugins()
# Get ChatGPT plugins - only ChatGPT verified plugins
urls = plugnplai.get_plugins(filter = 'ChatGPT')
# Get working plugins - only tested plugins (in progress)
urls = plugnplai.get_plugins(filter = 'working') | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai.html |
2122d7ea14fc-1 | urls = plugnplai.get_plugins(filter = 'working')
AI_PLUGINS = [AIPlugin.from_url(url + "/.well-known/ai-plugin.json") for url in urls]
Tool Retriever#
We will use a vectorstore to create embeddings for each tool description. Then, for an incoming query we can create embeddings for that query and do a similarity search for relevant tools.
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema import Document
embeddings = OpenAIEmbeddings()
docs = [
Document(page_content=plugin.description_for_model,
metadata={"plugin_name": plugin.name_for_model}
)
for plugin in AI_PLUGINS
]
vector_store = FAISS.from_documents(docs, embeddings)
toolkits_dict = {plugin.name_for_model:
NLAToolkit.from_llm_and_ai_plugin(llm, plugin)
for plugin in AI_PLUGINS}
Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.
Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.
Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.
Attempting to load an OpenAPI 3.0.2 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support. | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai.html |
2122d7ea14fc-2 | Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.
Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.
Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.
Attempting to load an OpenAPI 3.0.1 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.
Attempting to load a Swagger 2.0 spec. This may result in degraded performance. Convert your OpenAPI spec to 3.1.* spec for better support.
retriever = vector_store.as_retriever()
def get_tools(query):
# Get documents, which contain the Plugins to use
docs = retriever.get_relevant_documents(query)
# Get the toolkits, one for each plugin
tool_kits = [toolkits_dict[d.metadata["plugin_name"]] for d in docs]
# Get the tools: a separate NLAChain for each endpoint
tools = []
for tk in tool_kits:
tools.extend(tk.nla_tools)
return tools
We can now test this retriever to see if it seems to work.
tools = get_tools("What could I do today with my kiddo")
[t.name for t in tools]
['Milo.askMilo',
'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.search_all_actions', | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai.html |
2122d7ea14fc-3 | 'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.preview_a_zap',
'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.get_configuration_link',
'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.list_exposed_actions',
'SchoolDigger_API_V2.0.Autocomplete_GetSchools',
'SchoolDigger_API_V2.0.Districts_GetAllDistricts2',
'SchoolDigger_API_V2.0.Districts_GetDistrict2',
'SchoolDigger_API_V2.0.Rankings_GetSchoolRank2',
'SchoolDigger_API_V2.0.Rankings_GetRank_District',
'SchoolDigger_API_V2.0.Schools_GetAllSchools20',
'SchoolDigger_API_V2.0.Schools_GetSchool20',
'Speak.translate',
'Speak.explainPhrase',
'Speak.explainTask']
tools = get_tools("what shirts can i buy?")
[t.name for t in tools]
['Open_AI_Klarna_product_Api.productsUsingGET',
'Milo.askMilo',
'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.search_all_actions',
'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.preview_a_zap',
'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.get_configuration_link',
'Zapier_Natural_Language_Actions_(NLA)_API_(Dynamic)_-_Beta.list_exposed_actions',
'SchoolDigger_API_V2.0.Autocomplete_GetSchools', | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai.html |
2122d7ea14fc-4 | 'SchoolDigger_API_V2.0.Autocomplete_GetSchools',
'SchoolDigger_API_V2.0.Districts_GetAllDistricts2',
'SchoolDigger_API_V2.0.Districts_GetDistrict2',
'SchoolDigger_API_V2.0.Rankings_GetSchoolRank2',
'SchoolDigger_API_V2.0.Rankings_GetRank_District',
'SchoolDigger_API_V2.0.Schools_GetAllSchools20',
'SchoolDigger_API_V2.0.Schools_GetSchool20']
Prompt Template#
The prompt template is pretty standard, because weβre not actually changing that much logic in the actual prompt template, but rather we are just changing how retrieval is done.
# Set up the base template
template = """Answer the following questions as best you can, but speaking as a pirate might speak. You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin! Remember to speak as a pirate when giving your final answer. Use lots of "Arg"s
Question: {input}
{agent_scratchpad}"""
The custom prompt template now has the concept of a tools_getter, which we call on the input to select the tools to use
from typing import Callable
# Set up a prompt template
class CustomPromptTemplate(StringPromptTemplate):
# The template to use
template: str | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai.html |
2122d7ea14fc-5 | # The template to use
template: str
############## NEW ######################
# The list of tools available
tools_getter: Callable
def format(self, **kwargs) -> str:
# Get the intermediate steps (AgentAction, Observation tuples)
# Format them in a particular way
intermediate_steps = kwargs.pop("intermediate_steps")
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
# Set the agent_scratchpad variable to that value
kwargs["agent_scratchpad"] = thoughts
############## NEW ######################
tools = self.tools_getter(kwargs["input"])
# Create a tools variable from the list of tools provided
kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in tools])
# Create a list of tool names for the tools provided
kwargs["tool_names"] = ", ".join([tool.name for tool in tools])
return self.template.format(**kwargs)
prompt = CustomPromptTemplate(
template=template,
tools_getter=get_tools,
# This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically
# This includes the `intermediate_steps` variable because that is needed
input_variables=["input", "intermediate_steps"]
)
Output Parser#
The output parser is unchanged from the previous notebook, since we are not changing anything about the output format.
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
# Check if agent should finish | rtdocs_stable/api.python.langchain.com/en/stable/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.