method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
---|---|---|---|
test_import_storage | """Attempt to import storage modules."""
from langchain_community.storage.redis import RedisStore | def test_import_storage() ->None:
"""Attempt to import storage modules."""
from langchain_community.storage.redis import RedisStore | Attempt to import storage modules. |
test_memory_with_message_store | """Test the memory with a message store."""
message_history = MongoDBChatMessageHistory(connection_string=
connection_string, session_id='test-session')
memory = ConversationBufferMemory(memory_key='baz', chat_memory=
message_history, return_messages=True)
memory.chat_memory.add_ai_message('This is me, the AI')
memory.chat_memory.add_user_message('This is me, the human')
messages = memory.chat_memory.messages
messages_json = json.dumps([message_to_dict(msg) for msg in messages])
assert 'This is me, the AI' in messages_json
assert 'This is me, the human' in messages_json
memory.chat_memory.clear()
assert memory.chat_memory.messages == [] | def test_memory_with_message_store() ->None:
"""Test the memory with a message store."""
message_history = MongoDBChatMessageHistory(connection_string=
connection_string, session_id='test-session')
memory = ConversationBufferMemory(memory_key='baz', chat_memory=
message_history, return_messages=True)
memory.chat_memory.add_ai_message('This is me, the AI')
memory.chat_memory.add_user_message('This is me, the human')
messages = memory.chat_memory.messages
messages_json = json.dumps([message_to_dict(msg) for msg in messages])
assert 'This is me, the AI' in messages_json
assert 'This is me, the human' in messages_json
memory.chat_memory.clear()
assert memory.chat_memory.messages == [] | Test the memory with a message store. |
embed_query | return self._query([text])[0] | def embed_query(self, text: str) ->List[float]:
return self._query([text])[0] | null |
on_text | """
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
resp = self._init_resp()
resp.update({'action': 'on_text', 'text': text})
resp.update(self.get_custom_callback_meta())
self.on_text_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp) | def on_text(self, text: str, **kwargs: Any) ->None:
"""
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
resp = self._init_resp()
resp.update({'action': 'on_text', 'text': text})
resp.update(self.get_custom_callback_meta())
self.on_text_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp) | Run when agent is ending. |
from_examples | """Create k-shot example selector using example list and embeddings.
Reshuffles examples dynamically based on query similarity.
Args:
examples: List of examples to use in the prompt.
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select
input_keys: If provided, the search is based on the input variables
instead of all variables.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
The ExampleSelector instantiated, backed by a vector store.
"""
if input_keys:
string_examples = [' '.join(sorted_values({k: eg[k] for k in input_keys
})) for eg in examples]
else:
string_examples = [' '.join(sorted_values(eg)) for eg in examples]
vectorstore = vectorstore_cls.from_texts(string_examples, embeddings,
metadatas=examples, **vectorstore_cls_kwargs)
return cls(vectorstore=vectorstore, k=k, input_keys=input_keys) | @classmethod
def from_examples(cls, examples: List[dict], embeddings: Embeddings,
vectorstore_cls: Type[VectorStore], k: int=4, input_keys: Optional[List
[str]]=None, **vectorstore_cls_kwargs: Any
) ->SemanticSimilarityExampleSelector:
"""Create k-shot example selector using example list and embeddings.
Reshuffles examples dynamically based on query similarity.
Args:
examples: List of examples to use in the prompt.
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select
input_keys: If provided, the search is based on the input variables
instead of all variables.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
The ExampleSelector instantiated, backed by a vector store.
"""
if input_keys:
string_examples = [' '.join(sorted_values({k: eg[k] for k in
input_keys})) for eg in examples]
else:
string_examples = [' '.join(sorted_values(eg)) for eg in examples]
vectorstore = vectorstore_cls.from_texts(string_examples, embeddings,
metadatas=examples, **vectorstore_cls_kwargs)
return cls(vectorstore=vectorstore, k=k, input_keys=input_keys) | Create k-shot example selector using example list and embeddings.
Reshuffles examples dynamically based on query similarity.
Args:
examples: List of examples to use in the prompt.
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
k: Number of examples to select
input_keys: If provided, the search is based on the input variables
instead of all variables.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
The ExampleSelector instantiated, backed by a vector store. |
stream | return self.transform(iter([input]), config, **kwargs) | def stream(self, input: Dict[str, Any], config: Optional[RunnableConfig]=
None, **kwargs: Any) ->Iterator[Dict[str, Any]]:
return self.transform(iter([input]), config, **kwargs) | null |
_load_agent_from_file | """Load agent from file."""
valid_suffixes = {'json', 'yaml'}
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
if file_path.suffix[1:] == 'json':
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix[1:] == 'yaml':
with open(file_path, 'r') as f:
config = yaml.safe_load(f)
else:
raise ValueError(f'Unsupported file type, must be one of {valid_suffixes}.'
)
return load_agent_from_config(config, **kwargs) | def _load_agent_from_file(file: Union[str, Path], **kwargs: Any) ->Union[
BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from file."""
valid_suffixes = {'json', 'yaml'}
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
if file_path.suffix[1:] == 'json':
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix[1:] == 'yaml':
with open(file_path, 'r') as f:
config = yaml.safe_load(f)
else:
raise ValueError(
f'Unsupported file type, must be one of {valid_suffixes}.')
return load_agent_from_config(config, **kwargs) | Load agent from file. |
save_context | """Save context from this session for every memory."""
for memory in self.memories:
memory.save_context(inputs, outputs) | def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) ->None:
"""Save context from this session for every memory."""
for memory in self.memories:
memory.save_context(inputs, outputs) | Save context from this session for every memory. |
get_number_of_documents | """Helper to see the number of documents in the index
Returns:
int: The number of documents
"""
return self._client.index(self._index_name).get_stats()['numberOfDocuments'] | def get_number_of_documents(self) ->int:
"""Helper to see the number of documents in the index
Returns:
int: The number of documents
"""
return self._client.index(self._index_name).get_stats()['numberOfDocuments'
] | Helper to see the number of documents in the index
Returns:
int: The number of documents |
from_texts | vs = SKLearnVectorStore(embedding, persist_path=persist_path, **kwargs)
vs.add_texts(texts, metadatas=metadatas, ids=ids)
return vs | @classmethod
def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas:
Optional[List[dict]]=None, ids: Optional[List[str]]=None, persist_path:
Optional[str]=None, **kwargs: Any) ->'SKLearnVectorStore':
vs = SKLearnVectorStore(embedding, persist_path=persist_path, **kwargs)
vs.add_texts(texts, metadatas=metadatas, ids=ids)
return vs | null |
test_arcee_api_key_is_secret_string | mock_response = mock_get.return_value
mock_response.status_code = 200
mock_response.json.return_value = {'model_id': '', 'status':
'training_complete'}
arcee_without_env_var = Arcee(model='DALM-PubMed', arcee_api_key=
'secret_api_key', arcee_api_url='https://localhost', arcee_api_version=
'version')
assert isinstance(arcee_without_env_var.arcee_api_key, SecretStr) | @patch('langchain_community.utilities.arcee.requests.get')
def test_arcee_api_key_is_secret_string(mock_get: MagicMock) ->None:
mock_response = mock_get.return_value
mock_response.status_code = 200
mock_response.json.return_value = {'model_id': '', 'status':
'training_complete'}
arcee_without_env_var = Arcee(model='DALM-PubMed', arcee_api_key=
'secret_api_key', arcee_api_url='https://localhost',
arcee_api_version='version')
assert isinstance(arcee_without_env_var.arcee_api_key, SecretStr) | null |
_import_requests_tool_RequestsDeleteTool | from langchain_community.tools.requests.tool import RequestsDeleteTool
return RequestsDeleteTool | def _import_requests_tool_RequestsDeleteTool() ->Any:
from langchain_community.tools.requests.tool import RequestsDeleteTool
return RequestsDeleteTool | null |
_call | from bs4 import BeautifulSoup
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
url = inputs[self.input_key]
res = self.requests_wrapper.get(url)
soup = BeautifulSoup(res, 'html.parser')
other_keys[self.requests_key] = soup.get_text()[:self.text_length]
result = self.llm_chain.predict(callbacks=_run_manager.get_child(), **
other_keys)
return {self.output_key: result} | def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, Any]:
from bs4 import BeautifulSoup
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
url = inputs[self.input_key]
res = self.requests_wrapper.get(url)
soup = BeautifulSoup(res, 'html.parser')
other_keys[self.requests_key] = soup.get_text()[:self.text_length]
result = self.llm_chain.predict(callbacks=_run_manager.get_child(), **
other_keys)
return {self.output_key: result} | null |
_stream | """Allows streaming to model!"""
inputs = self.custom_preprocess(messages)
for response in self.get_stream(inputs=inputs, stop=stop, labels=labels, **
kwargs):
chunk = self._get_filled_chunk(self.custom_postprocess(response))
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk) | def _stream(self, messages: List[BaseMessage], stop: Optional[Sequence[str]
]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, labels:
Optional[dict]=None, **kwargs: Any) ->Iterator[ChatGenerationChunk]:
"""Allows streaming to model!"""
inputs = self.custom_preprocess(messages)
for response in self.get_stream(inputs=inputs, stop=stop, labels=labels,
**kwargs):
chunk = self._get_filled_chunk(self.custom_postprocess(response))
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk) | Allows streaming to model! |
_chain_type | return 'graph_aql_chain' | @property
def _chain_type(self) ->str:
return 'graph_aql_chain' | null |
embeddings | return self.embedding | @property
def embeddings(self) ->Embeddings:
return self.embedding | null |
_import_aleph_alpha | from langchain_community.llms.aleph_alpha import AlephAlpha
return AlephAlpha | def _import_aleph_alpha() ->Any:
from langchain_community.llms.aleph_alpha import AlephAlpha
return AlephAlpha | null |
__call__ | """Maps the Run and Example to a dictionary."""
if not example.outputs:
raise ValueError(
f'Example {example.id} has no outputs to use as areference label.')
return self.map(example) | def __call__(self, example: Example) ->Dict[str, str]:
"""Maps the Run and Example to a dictionary."""
if not example.outputs:
raise ValueError(
f'Example {example.id} has no outputs to use as areference label.')
return self.map(example) | Maps the Run and Example to a dictionary. |
_run | from langchain.output_parsers.json import parse_json_markdown
try:
data = parse_json_markdown(text)
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.post(data['url'], data['data'])
response = response[:self.response_length]
return self.llm_chain.predict(response=response, instructions=data[
'output_instructions']).strip() | def _run(self, text: str) ->str:
from langchain.output_parsers.json import parse_json_markdown
try:
data = parse_json_markdown(text)
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.post(data['url'], data['data'])
response = response[:self.response_length]
return self.llm_chain.predict(response=response, instructions=data[
'output_instructions']).strip() | null |
parse | """Parse the request and error tags."""
json_match = re.search('```json(.*?)```', llm_output, re.DOTALL)
if json_match:
return self._load_json_block(json_match.group(1).strip())
message_match = re.search('```text(.*?)```', llm_output, re.DOTALL)
if message_match:
return f'MESSAGE: {message_match.group(1).strip()}'
return 'ERROR making request' | def parse(self, llm_output: str) ->str:
"""Parse the request and error tags."""
json_match = re.search('```json(.*?)```', llm_output, re.DOTALL)
if json_match:
return self._load_json_block(json_match.group(1).strip())
message_match = re.search('```text(.*?)```', llm_output, re.DOTALL)
if message_match:
return f'MESSAGE: {message_match.group(1).strip()}'
return 'ERROR making request' | Parse the request and error tags. |
_import_spark_sql_tool_QueryCheckerTool | from langchain_community.tools.spark_sql.tool import QueryCheckerTool
return QueryCheckerTool | def _import_spark_sql_tool_QueryCheckerTool() ->Any:
from langchain_community.tools.spark_sql.tool import QueryCheckerTool
return QueryCheckerTool | null |
max_marginal_relevance_search | """Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
text_in_page_content: Filter by the text in page_content of Document.
meta_filter (Optional[dict]): Filter by metadata. Defaults to None.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self.awadb_client is None:
raise ValueError('AwaDB client is None!!!')
embedding: List[float] = []
if self.using_table_name in self.table2embeddings:
embedding = self.table2embeddings[self.using_table_name].embed_query(query)
else:
from awadb import AwaEmbedding
embedding = AwaEmbedding().Embedding(query)
if embedding.__len__() == 0:
return []
results = self.max_marginal_relevance_search_by_vector(embedding, k,
fetch_k, lambda_mult=lambda_mult, text_in_page_content=
text_in_page_content, meta_filter=meta_filter)
return results | def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int=
20, lambda_mult: float=0.5, text_in_page_content: Optional[str]=None,
meta_filter: Optional[dict]=None, **kwargs: Any) ->List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
text_in_page_content: Filter by the text in page_content of Document.
meta_filter (Optional[dict]): Filter by metadata. Defaults to None.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self.awadb_client is None:
raise ValueError('AwaDB client is None!!!')
embedding: List[float] = []
if self.using_table_name in self.table2embeddings:
embedding = self.table2embeddings[self.using_table_name].embed_query(
query)
else:
from awadb import AwaEmbedding
embedding = AwaEmbedding().Embedding(query)
if embedding.__len__() == 0:
return []
results = self.max_marginal_relevance_search_by_vector(embedding, k,
fetch_k, lambda_mult=lambda_mult, text_in_page_content=
text_in_page_content, meta_filter=meta_filter)
return results | Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
text_in_page_content: Filter by the text in page_content of Document.
meta_filter (Optional[dict]): Filter by metadata. Defaults to None.
Returns:
List of Documents selected by maximal marginal relevance. |
_is_b64 | return s.startswith('data:image') | def _is_b64(s: str) ->bool:
return s.startswith('data:image') | null |
test_openai_model_param | llm = ChatOpenAI(model='foo')
assert llm.model_name == 'foo'
llm = ChatOpenAI(model_name='foo')
assert llm.model_name == 'foo' | @pytest.mark.requires('openai')
def test_openai_model_param() ->None:
llm = ChatOpenAI(model='foo')
assert llm.model_name == 'foo'
llm = ChatOpenAI(model_name='foo')
assert llm.model_name == 'foo' | null |
_chunk | for i in range(0, len(texts), size):
yield texts[i:i + size] | def _chunk(texts: List[str], size: int) ->Iterator[List[str]]:
for i in range(0, len(texts), size):
yield texts[i:i + size] | null |
load_tools | """Load tools based on their name.
Tools allow agents to interact with various resources and services like
APIs, databases, file systems, etc.
Please scope the permissions of each tools to the minimum required for the
application.
For example, if an application only needs to read from a database,
the database tool should not be given write permissions. Moreover
consider scoping the permissions to only allow accessing specific
tables and impose user-level quota for limiting resource usage.
Please read the APIs of the individual tools to determine which configuration
they support.
See [Security](https://python.langchain.com/docs/security) for more information.
Args:
tool_names: name of tools to load.
llm: An optional language model, may be needed to initialize certain tools.
callbacks: Optional callback manager or list of callback handlers.
If not provided, default global callback manager will be used.
Returns:
List of tools.
"""
tools = []
callbacks = _handle_callbacks(callback_manager=kwargs.get(
'callback_manager'), callbacks=callbacks)
for name in tool_names:
if name == 'requests':
warnings.warn(
'tool name `requests` is deprecated - please use `requests_all` or specify the requests method'
)
if name == 'requests_all':
requests_method_tools = [_tool for _tool in _BASE_TOOLS if _tool.
startswith('requests_')]
tool_names.extend(requests_method_tools)
elif name in _BASE_TOOLS:
tools.append(_BASE_TOOLS[name]())
elif name in _LLM_TOOLS:
if llm is None:
raise ValueError(f'Tool {name} requires an LLM to be provided')
tool = _LLM_TOOLS[name](llm)
tools.append(tool)
elif name in _EXTRA_LLM_TOOLS:
if llm is None:
raise ValueError(f'Tool {name} requires an LLM to be provided')
_get_llm_tool_func, extra_keys = _EXTRA_LLM_TOOLS[name]
missing_keys = set(extra_keys).difference(kwargs)
if missing_keys:
raise ValueError(
f'Tool {name} requires some parameters that were not provided: {missing_keys}'
)
sub_kwargs = {k: kwargs[k] for k in extra_keys}
tool = _get_llm_tool_func(llm=llm, **sub_kwargs)
tools.append(tool)
elif name in _EXTRA_OPTIONAL_TOOLS:
_get_tool_func, extra_keys = _EXTRA_OPTIONAL_TOOLS[name]
sub_kwargs = {k: kwargs[k] for k in extra_keys if k in kwargs}
tool = _get_tool_func(**sub_kwargs)
tools.append(tool)
else:
raise ValueError(f'Got unknown tool {name}')
if callbacks is not None:
for tool in tools:
tool.callbacks = callbacks
return tools | def load_tools(tool_names: List[str], llm: Optional[BaseLanguageModel]=None,
callbacks: Callbacks=None, **kwargs: Any) ->List[BaseTool]:
"""Load tools based on their name.
Tools allow agents to interact with various resources and services like
APIs, databases, file systems, etc.
Please scope the permissions of each tools to the minimum required for the
application.
For example, if an application only needs to read from a database,
the database tool should not be given write permissions. Moreover
consider scoping the permissions to only allow accessing specific
tables and impose user-level quota for limiting resource usage.
Please read the APIs of the individual tools to determine which configuration
they support.
See [Security](https://python.langchain.com/docs/security) for more information.
Args:
tool_names: name of tools to load.
llm: An optional language model, may be needed to initialize certain tools.
callbacks: Optional callback manager or list of callback handlers.
If not provided, default global callback manager will be used.
Returns:
List of tools.
"""
tools = []
callbacks = _handle_callbacks(callback_manager=kwargs.get(
'callback_manager'), callbacks=callbacks)
for name in tool_names:
if name == 'requests':
warnings.warn(
'tool name `requests` is deprecated - please use `requests_all` or specify the requests method'
)
if name == 'requests_all':
requests_method_tools = [_tool for _tool in _BASE_TOOLS if
_tool.startswith('requests_')]
tool_names.extend(requests_method_tools)
elif name in _BASE_TOOLS:
tools.append(_BASE_TOOLS[name]())
elif name in _LLM_TOOLS:
if llm is None:
raise ValueError(f'Tool {name} requires an LLM to be provided')
tool = _LLM_TOOLS[name](llm)
tools.append(tool)
elif name in _EXTRA_LLM_TOOLS:
if llm is None:
raise ValueError(f'Tool {name} requires an LLM to be provided')
_get_llm_tool_func, extra_keys = _EXTRA_LLM_TOOLS[name]
missing_keys = set(extra_keys).difference(kwargs)
if missing_keys:
raise ValueError(
f'Tool {name} requires some parameters that were not provided: {missing_keys}'
)
sub_kwargs = {k: kwargs[k] for k in extra_keys}
tool = _get_llm_tool_func(llm=llm, **sub_kwargs)
tools.append(tool)
elif name in _EXTRA_OPTIONAL_TOOLS:
_get_tool_func, extra_keys = _EXTRA_OPTIONAL_TOOLS[name]
sub_kwargs = {k: kwargs[k] for k in extra_keys if k in kwargs}
tool = _get_tool_func(**sub_kwargs)
tools.append(tool)
else:
raise ValueError(f'Got unknown tool {name}')
if callbacks is not None:
for tool in tools:
tool.callbacks = callbacks
return tools | Load tools based on their name.
Tools allow agents to interact with various resources and services like
APIs, databases, file systems, etc.
Please scope the permissions of each tools to the minimum required for the
application.
For example, if an application only needs to read from a database,
the database tool should not be given write permissions. Moreover
consider scoping the permissions to only allow accessing specific
tables and impose user-level quota for limiting resource usage.
Please read the APIs of the individual tools to determine which configuration
they support.
See [Security](https://python.langchain.com/docs/security) for more information.
Args:
tool_names: name of tools to load.
llm: An optional language model, may be needed to initialize certain tools.
callbacks: Optional callback manager or list of callback handlers.
If not provided, default global callback manager will be used.
Returns:
List of tools. |
__getattr__ | """Get attr name."""
if name == 'create_pandas_dataframe_agent':
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = 'langchain.' + here + '.' + name
new_path = 'langchain_experimental.' + here + '.' + name
raise ImportError(
f"""This agent has been moved to langchain experiment. This agent relies on python REPL tool under the hood, so to use it safely please sandbox the python REPL. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md and https://github.com/langchain-ai/langchain/discussions/11680To keep using this code as is, install langchain experimental and update your import statement from:
`{old_path}` to `{new_path}`."""
)
raise AttributeError(f'{name} does not exist') | def __getattr__(name: str) ->Any:
"""Get attr name."""
if name == 'create_pandas_dataframe_agent':
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = 'langchain.' + here + '.' + name
new_path = 'langchain_experimental.' + here + '.' + name
raise ImportError(
f"""This agent has been moved to langchain experiment. This agent relies on python REPL tool under the hood, so to use it safely please sandbox the python REPL. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md and https://github.com/langchain-ai/langchain/discussions/11680To keep using this code as is, install langchain experimental and update your import statement from:
`{old_path}` to `{new_path}`."""
)
raise AttributeError(f'{name} does not exist') | Get attr name. |
ignore_llm | """Whether to ignore LLM callbacks."""
return self.ignore_llm_ | @property
def ignore_llm(self) ->bool:
"""Whether to ignore LLM callbacks."""
return self.ignore_llm_ | Whether to ignore LLM callbacks. |
_chunk_to_generation | return Generation(text=chunk.text, generation_info=chunk.generation_info) | @staticmethod
def _chunk_to_generation(chunk: GenerationChunk) ->Generation:
return Generation(text=chunk.text, generation_info=chunk.generation_info) | null |
_uuid_key | return uuid.uuid4().hex | def _uuid_key() ->str:
return uuid.uuid4().hex | null |
from_nasa_api_wrapper | operations: List[Dict] = [{'mode': 'search_media', 'name':
'Search NASA Image and Video Library media', 'description':
NASA_SEARCH_PROMPT}, {'mode': 'get_media_metadata_manifest', 'name':
'Get NASA Image and Video Library media metadata manifest',
'description': NASA_MANIFEST_PROMPT}, {'mode':
'get_media_metadata_location', 'name':
'Get NASA Image and Video Library media metadata location',
'description': NASA_METADATA_PROMPT}, {'mode':
'get_video_captions_location', 'name':
'Get NASA Image and Video Library video captions location',
'description': NASA_CAPTIONS_PROMPT}]
tools = [NasaAction(name=action['name'], description=action['description'],
mode=action['mode'], api_wrapper=nasa_api_wrapper) for action in operations
]
return cls(tools=tools) | @classmethod
def from_nasa_api_wrapper(cls, nasa_api_wrapper: NasaAPIWrapper
) ->'NasaToolkit':
operations: List[Dict] = [{'mode': 'search_media', 'name':
'Search NASA Image and Video Library media', 'description':
NASA_SEARCH_PROMPT}, {'mode': 'get_media_metadata_manifest', 'name':
'Get NASA Image and Video Library media metadata manifest',
'description': NASA_MANIFEST_PROMPT}, {'mode':
'get_media_metadata_location', 'name':
'Get NASA Image and Video Library media metadata location',
'description': NASA_METADATA_PROMPT}, {'mode':
'get_video_captions_location', 'name':
'Get NASA Image and Video Library video captions location',
'description': NASA_CAPTIONS_PROMPT}]
tools = [NasaAction(name=action['name'], description=action[
'description'], mode=action['mode'], api_wrapper=nasa_api_wrapper) for
action in operations]
return cls(tools=tools) | null |
embeddings | return self._embedding | @property
def embeddings(self) ->Embeddings:
return self._embedding | null |
similarity_search_by_vector | """Perform a similarity search with MyScale by vectors
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of (Document, similarity)
"""
q_str = self._build_qstr(embedding, k, where_str)
try:
return [Document(page_content=r[self.config.column_map['text']],
metadata=r[self.config.column_map['metadata']]) for r in self.
client.query(q_str).named_results()]
except Exception as e:
logger.error(f'\x1b[91m\x1b[1m{type(e)}\x1b[0m \x1b[95m{str(e)}\x1b[0m')
return [] | def similarity_search_by_vector(self, embedding: List[float], k: int=4,
where_str: Optional[str]=None, **kwargs: Any) ->List[Document]:
"""Perform a similarity search with MyScale by vectors
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of (Document, similarity)
"""
q_str = self._build_qstr(embedding, k, where_str)
try:
return [Document(page_content=r[self.config.column_map['text']],
metadata=r[self.config.column_map['metadata']]) for r in self.
client.query(q_str).named_results()]
except Exception as e:
logger.error(f'\x1b[91m\x1b[1m{type(e)}\x1b[0m \x1b[95m{str(e)}\x1b[0m'
)
return [] | Perform a similarity search with MyScale by vectors
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of (Document, similarity) |
_validate_uri | if self.target_uri == 'databricks':
return
if urlparse(self.target_uri).scheme != 'databricks':
raise ValueError(
'Invalid target URI. The target URI must be a valid databricks URI.') | def _validate_uri(self) ->None:
if self.target_uri == 'databricks':
return
if urlparse(self.target_uri).scheme != 'databricks':
raise ValueError(
'Invalid target URI. The target URI must be a valid databricks URI.'
) | null |
test_del | mock_session_pool.return_value = MagicMock()
nebula_graph = NebulaGraph(self.space, self.username, self.password, self.
address, self.port, self.session_pool_size)
nebula_graph.__del__()
mock_session_pool.return_value.close.assert_called_once() | @patch('nebula3.gclient.net.SessionPool.SessionPool')
def test_del(self, mock_session_pool: Any) ->None:
mock_session_pool.return_value = MagicMock()
nebula_graph = NebulaGraph(self.space, self.username, self.password,
self.address, self.port, self.session_pool_size)
nebula_graph.__del__()
mock_session_pool.return_value.close.assert_called_once() | null |
consistent_fake_sparse_encoder | """
Generates a consistent fake sparse vector.
Parameters:
- query (str): The query string to make the function deterministic.
- size (int): The size of the vector to generate.
- density (float): The density of the vector to generate.
Returns:
- indices (list): List of indices where the non-zero elements are located.
- values (list): List of corresponding float values at the non-zero indices.
"""
density = max(0.0, min(1.0, density))
seed = hash(query)
random.seed(seed)
num_non_zero_elements = int(size * density)
indices = sorted(random.sample(range(size), num_non_zero_elements))
values = [random.uniform(0.0, 1.0) for _ in range(num_non_zero_elements)]
return indices, values | def consistent_fake_sparse_encoder(query: str, size: int=100, density:
float=0.7) ->Tuple[List[int], List[float]]:
"""
Generates a consistent fake sparse vector.
Parameters:
- query (str): The query string to make the function deterministic.
- size (int): The size of the vector to generate.
- density (float): The density of the vector to generate.
Returns:
- indices (list): List of indices where the non-zero elements are located.
- values (list): List of corresponding float values at the non-zero indices.
"""
density = max(0.0, min(1.0, density))
seed = hash(query)
random.seed(seed)
num_non_zero_elements = int(size * density)
indices = sorted(random.sample(range(size), num_non_zero_elements))
values = [random.uniform(0.0, 1.0) for _ in range(num_non_zero_elements)]
return indices, values | Generates a consistent fake sparse vector.
Parameters:
- query (str): The query string to make the function deterministic.
- size (int): The size of the vector to generate.
- density (float): The density of the vector to generate.
Returns:
- indices (list): List of indices where the non-zero elements are located.
- values (list): List of corresponding float values at the non-zero indices. |
_generate | params, chat, message = self._prepare_chat(messages, stop=stop)
response: genai.types.GenerateContentResponse = _chat_with_retry(content=
message, **params, generation_method=chat.send_message)
return _response_to_result(response) | def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->ChatResult:
params, chat, message = self._prepare_chat(messages, stop=stop)
response: genai.types.GenerateContentResponse = _chat_with_retry(content
=message, **params, generation_method=chat.send_message)
return _response_to_result(response) | null |
parse | raise NotImplementedError(
'This OutputParser can only be called by the `parse_with_prompt` method.') | def parse(self, completion: str) ->T:
raise NotImplementedError(
'This OutputParser can only be called by the `parse_with_prompt` method.'
) | null |
get_name | """Get the name of the runnable."""
name = name or self.name or self.__class__.__name__
if suffix:
if name[0].isupper():
return name + suffix.title()
else:
return name + '_' + suffix.lower()
else:
return name | def get_name(self, suffix: Optional[str]=None, *, name: Optional[str]=None
) ->str:
"""Get the name of the runnable."""
name = name or self.name or self.__class__.__name__
if suffix:
if name[0].isupper():
return name + suffix.title()
else:
return name + '_' + suffix.lower()
else:
return name | Get the name of the runnable. |
delete_all_indexes | client = self.client()
indexes = client.get_indexes()
for index in indexes['results']:
task = client.index(index.uid).delete()
client.wait_for_task(task.task_uid) | def delete_all_indexes(self) ->None:
client = self.client()
indexes = client.get_indexes()
for index in indexes['results']:
task = client.index(index.uid).delete()
client.wait_for_task(task.task_uid) | null |
_scopes | """Return required scopes.""" | @property
@abstractmethod
def _scopes(self) ->List[str]:
"""Return required scopes.""" | Return required scopes. |
embed_query | """Embed query text.
Args:
text: The text to embed.
Returns:
List[float]: Embeddings for the text.
"""
if not self.access_token:
self._refresh_access_token_with_lock()
resp = self._embedding({'input': [text]})
if resp.get('error_code'):
if resp.get('error_code') == 111:
self._refresh_access_token_with_lock()
resp = self._embedding({'input': [text]})
else:
raise ValueError(f'Error from Ernie: {resp}')
return resp['data'][0]['embedding'] | def embed_query(self, text: str) ->List[float]:
"""Embed query text.
Args:
text: The text to embed.
Returns:
List[float]: Embeddings for the text.
"""
if not self.access_token:
self._refresh_access_token_with_lock()
resp = self._embedding({'input': [text]})
if resp.get('error_code'):
if resp.get('error_code') == 111:
self._refresh_access_token_with_lock()
resp = self._embedding({'input': [text]})
else:
raise ValueError(f'Error from Ernie: {resp}')
return resp['data'][0]['embedding'] | Embed query text.
Args:
text: The text to embed.
Returns:
List[float]: Embeddings for the text. |
embed_documents | """Embed a list of documents using AwaEmbedding.
Args:
texts: The list of texts need to be embedded
Returns:
List of embeddings, one for each text.
"""
return self.client.EmbeddingBatch(texts) | def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Embed a list of documents using AwaEmbedding.
Args:
texts: The list of texts need to be embedded
Returns:
List of embeddings, one for each text.
"""
return self.client.EmbeddingBatch(texts) | Embed a list of documents using AwaEmbedding.
Args:
texts: The list of texts need to be embedded
Returns:
List of embeddings, one for each text. |
_call | if self.sequential_responses:
return self._get_next_response_in_sequence
if self.queries is not None:
return self.queries[prompt]
if stop is None:
return 'foo'
else:
return 'bar' | def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
if self.sequential_responses:
return self._get_next_response_in_sequence
if self.queries is not None:
return self.queries[prompt]
if stop is None:
return 'foo'
else:
return 'bar' | null |
nlp_request | """
Make an API request to the Diffbot NLP endpoint.
Args:
text (str): The text to be processed.
Returns:
Dict[str, Any]: The JSON response from the API.
"""
payload = {'content': text, 'lang': 'en'}
FIELDS = 'facts'
HOST = 'nl.diffbot.com'
url = (
f'https://{HOST}/v1/?fields={FIELDS}&token={self.diffbot_api_key}&language=en'
)
result = requests.post(url, data=payload)
return result.json() | def nlp_request(self, text: str) ->Dict[str, Any]:
"""
Make an API request to the Diffbot NLP endpoint.
Args:
text (str): The text to be processed.
Returns:
Dict[str, Any]: The JSON response from the API.
"""
payload = {'content': text, 'lang': 'en'}
FIELDS = 'facts'
HOST = 'nl.diffbot.com'
url = (
f'https://{HOST}/v1/?fields={FIELDS}&token={self.diffbot_api_key}&language=en'
)
result = requests.post(url, data=payload)
return result.json() | Make an API request to the Diffbot NLP endpoint.
Args:
text (str): The text to be processed.
Returns:
Dict[str, Any]: The JSON response from the API. |
_llm_type | return 'fake-list-chat-model' | @property
def _llm_type(self) ->str:
return 'fake-list-chat-model' | null |
test_mdelete | key_value_pairs = [('key1', b'value1'), ('key2', b'value2')]
file_store.mset(key_value_pairs)
file_store.mdelete(['key1'])
values = file_store.mget(['key1'])
assert values == [None] | def test_mdelete(file_store: LocalFileStore) ->None:
key_value_pairs = [('key1', b'value1'), ('key2', b'value2')]
file_store.mset(key_value_pairs)
file_store.mdelete(['key1'])
values = file_store.mget(['key1'])
assert values == [None] | null |
test_vertex_generate_code | llm = VertexAI(temperature=0.3, n=2, model_name='code-bison@001')
output = llm.generate(['generate a python method that says foo:'])
assert isinstance(output, LLMResult)
assert len(output.generations) == 1
assert len(output.generations[0]) == 2 | @pytest.mark.scheduled
def test_vertex_generate_code() ->None:
llm = VertexAI(temperature=0.3, n=2, model_name='code-bison@001')
output = llm.generate(['generate a python method that says foo:'])
assert isinstance(output, LLMResult)
assert len(output.generations) == 1
assert len(output.generations[0]) == 2 | null |
_get_score | match = re.search('grade:\\s*(correct|incorrect)', text.strip(), re.IGNORECASE)
if match:
if match.group(1).upper() == 'CORRECT':
return 'CORRECT', 1
elif match.group(1).upper() == 'INCORRECT':
return 'INCORRECT', 0
try:
first_word = text.strip().split()[0].translate(str.maketrans('', '',
string.punctuation))
if first_word.upper() == 'CORRECT':
return 'CORRECT', 1
elif first_word.upper() == 'INCORRECT':
return 'INCORRECT', 0
last_word = text.strip().split()[-1].translate(str.maketrans('', '',
string.punctuation))
if last_word.upper() == 'CORRECT':
return 'CORRECT', 1
elif last_word.upper() == 'INCORRECT':
return 'INCORRECT', 0
except IndexError:
pass
return None | def _get_score(text: str) ->Optional[Tuple[str, int]]:
match = re.search('grade:\\s*(correct|incorrect)', text.strip(), re.
IGNORECASE)
if match:
if match.group(1).upper() == 'CORRECT':
return 'CORRECT', 1
elif match.group(1).upper() == 'INCORRECT':
return 'INCORRECT', 0
try:
first_word = text.strip().split()[0].translate(str.maketrans('', '',
string.punctuation))
if first_word.upper() == 'CORRECT':
return 'CORRECT', 1
elif first_word.upper() == 'INCORRECT':
return 'INCORRECT', 0
last_word = text.strip().split()[-1].translate(str.maketrans('', '',
string.punctuation))
if last_word.upper() == 'CORRECT':
return 'CORRECT', 1
elif last_word.upper() == 'INCORRECT':
return 'INCORRECT', 0
except IndexError:
pass
return None | null |
test_functions_call | chat = QianfanChatEndpoint(model='ERNIE-Bot')
prompt = ChatPromptTemplate(messages=[HumanMessage(content=
"What's the temperature in Shanghai today?"), AIMessage(content='',
additional_kwargs={'function_call': {'name': 'get_current_temperature',
'thoughts':
'i will use get_current_temperature to resolve the questions',
'arguments': '{"location":"Shanghai","unit":"centigrade"}'}}),
FunctionMessage(name='get_current_weather', content=
'{"temperature": "25", "unit": "摄氏度", "description": "晴朗"}'
)])
chain = prompt | chat.bind(functions=_FUNCTIONS)
resp = chain.invoke({})
assert isinstance(resp, AIMessage) | def test_functions_call() ->None:
chat = QianfanChatEndpoint(model='ERNIE-Bot')
prompt = ChatPromptTemplate(messages=[HumanMessage(content=
"What's the temperature in Shanghai today?"), AIMessage(content='',
additional_kwargs={'function_call': {'name':
'get_current_temperature', 'thoughts':
'i will use get_current_temperature to resolve the questions',
'arguments': '{"location":"Shanghai","unit":"centigrade"}'}}),
FunctionMessage(name='get_current_weather', content=
'{"temperature": "25", "unit": "摄氏度", "description": "晴朗"}'
)])
chain = prompt | chat.bind(functions=_FUNCTIONS)
resp = chain.invoke({})
assert isinstance(resp, AIMessage) | null |
find_replies | """
Recursively find all replies to a given parent message ID.
Args:
parent_id (int): The parent message ID.
reply_data (pd.DataFrame): A DataFrame containing reply messages.
Returns:
list: A list of message IDs that are replies to the parent message ID.
"""
direct_replies = reply_data[reply_data['reply_to_id'] == parent_id][
'message.id'].tolist()
all_replies = []
for reply_id in direct_replies:
all_replies += [reply_id] + find_replies(reply_id, reply_data)
return all_replies | def find_replies(parent_id: int, reply_data: pd.DataFrame) ->List[int]:
"""
Recursively find all replies to a given parent message ID.
Args:
parent_id (int): The parent message ID.
reply_data (pd.DataFrame): A DataFrame containing reply messages.
Returns:
list: A list of message IDs that are replies to the parent message ID.
"""
direct_replies = reply_data[reply_data['reply_to_id'] == parent_id][
'message.id'].tolist()
all_replies = []
for reply_id in direct_replies:
all_replies += [reply_id] + find_replies(reply_id, reply_data)
return all_replies | Recursively find all replies to a given parent message ID.
Args:
parent_id (int): The parent message ID.
reply_data (pd.DataFrame): A DataFrame containing reply messages.
Returns:
list: A list of message IDs that are replies to the parent message ID. |
validate_environment | """Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(values, 'azure_cogs_key',
'AZURE_COGS_KEY')
azure_cogs_region = get_from_dict_or_env(values, 'azure_cogs_region',
'AZURE_COGS_REGION')
try:
import azure.cognitiveservices.speech as speechsdk
values['speech_config'] = speechsdk.SpeechConfig(subscription=
azure_cogs_key, region=azure_cogs_region)
except ImportError:
raise ImportError(
'azure-cognitiveservices-speech is not installed. Run `pip install azure-cognitiveservices-speech` to install.'
)
return values | @root_validator(pre=True)
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(values, 'azure_cogs_key',
'AZURE_COGS_KEY')
azure_cogs_region = get_from_dict_or_env(values, 'azure_cogs_region',
'AZURE_COGS_REGION')
try:
import azure.cognitiveservices.speech as speechsdk
values['speech_config'] = speechsdk.SpeechConfig(subscription=
azure_cogs_key, region=azure_cogs_region)
except ImportError:
raise ImportError(
'azure-cognitiveservices-speech is not installed. Run `pip install azure-cognitiveservices-speech` to install.'
)
return values | Validate that api key and endpoint exists in environment. |
test_func_call | act = json.dumps([{'action_name': 'foo', 'action': {'param': 42}}])
msg = AIMessage(content='LLM thoughts.', additional_kwargs={'function_call':
{'name': 'foo', 'arguments': f'{{"actions": {act}}}'}})
result = _parse_ai_message(msg)
assert isinstance(result, list)
assert len(result) == 1
action = result[0]
assert isinstance(action, _FunctionsAgentAction)
assert action.tool == 'foo'
assert action.tool_input == {'param': 42}
assert action.log == """
Invoking: `foo` with `{'param': 42}`
responded: LLM thoughts.
"""
assert action.message_log == [msg] | def test_func_call(self) ->None:
act = json.dumps([{'action_name': 'foo', 'action': {'param': 42}}])
msg = AIMessage(content='LLM thoughts.', additional_kwargs={
'function_call': {'name': 'foo', 'arguments': f'{{"actions": {act}}}'}}
)
result = _parse_ai_message(msg)
assert isinstance(result, list)
assert len(result) == 1
action = result[0]
assert isinstance(action, _FunctionsAgentAction)
assert action.tool == 'foo'
assert action.tool_input == {'param': 42}
assert action.log == "\nInvoking: `foo` with `{'param': 42}`\nresponded: LLM thoughts.\n\n"
assert action.message_log == [msg] | null |
_import_google_serper | from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
return GoogleSerperAPIWrapper | def _import_google_serper() ->Any:
from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
return GoogleSerperAPIWrapper | null |
test_parse_history | system_input = "You're supposed to answer math questions."
text_question1, text_answer1 = 'How much is 2+2?', '4'
text_question2 = 'How much is 3+3?'
system_message = SystemMessage(content=system_input)
message1 = HumanMessage(content=text_question1)
message2 = AIMessage(content=text_answer1)
message3 = HumanMessage(content=text_question2)
messages = [system_message, message1, message2, message3]
history = _parse_chat_history(messages, convert_system_message_to_human=True)
assert len(history) == 3
assert history[0] == {'role': 'user', 'parts': [{'text': system_input}, {
'text': text_question1}]}
assert history[1] == {'role': 'model', 'parts': [{'text': text_answer1}]} | def test_parse_history() ->None:
system_input = "You're supposed to answer math questions."
text_question1, text_answer1 = 'How much is 2+2?', '4'
text_question2 = 'How much is 3+3?'
system_message = SystemMessage(content=system_input)
message1 = HumanMessage(content=text_question1)
message2 = AIMessage(content=text_answer1)
message3 = HumanMessage(content=text_question2)
messages = [system_message, message1, message2, message3]
history = _parse_chat_history(messages, convert_system_message_to_human
=True)
assert len(history) == 3
assert history[0] == {'role': 'user', 'parts': [{'text': system_input},
{'text': text_question1}]}
assert history[1] == {'role': 'model', 'parts': [{'text': text_answer1}]} | null |
test_chroma_with_relevance_score_custom_normalization_fn | """Test searching with relevance score and custom normalization function."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = Chroma.from_texts(collection_name='test_collection', texts=
texts, embedding=FakeEmbeddings(), metadatas=metadatas,
relevance_score_fn=lambda d: d * 0, collection_metadata={'hnsw:space':
'l2'})
output = docsearch.similarity_search_with_relevance_scores('foo', k=3)
assert output == [(Document(page_content='foo', metadata={'page': '0'}), -
0.0), (Document(page_content='bar', metadata={'page': '1'}), -0.0), (
Document(page_content='baz', metadata={'page': '2'}), -0.0)] | def test_chroma_with_relevance_score_custom_normalization_fn() ->None:
"""Test searching with relevance score and custom normalization function."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = Chroma.from_texts(collection_name='test_collection', texts=
texts, embedding=FakeEmbeddings(), metadatas=metadatas,
relevance_score_fn=lambda d: d * 0, collection_metadata={
'hnsw:space': 'l2'})
output = docsearch.similarity_search_with_relevance_scores('foo', k=3)
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
-0.0), (Document(page_content='bar', metadata={'page': '1'}), -0.0),
(Document(page_content='baz', metadata={'page': '2'}), -0.0)] | Test searching with relevance score and custom normalization function. |
create_prompt | additional_prompts = []
if num_plates:
num_plates_str = (f'are {num_plates} plates' if num_plates > 1 else
'is 1 plate')
additional_prompts.append(NUM_PLATES_PROMPT.format(num_plates_str=
num_plates_str))
if num_rows:
additional_prompts.append(ROWS_PROMPT.format(num_rows=num_rows))
if num_cols:
additional_prompts.append(COLS_PROMPT.format(num_cols=num_cols))
return '\n'.join(additional_prompts
) if additional_prompts else GENERIC_PLATES_PROMPT | def create_prompt(num_plates: Optional[int]=None, num_rows: Optional[int]=
None, num_cols: Optional[int]=None) ->str:
additional_prompts = []
if num_plates:
num_plates_str = (f'are {num_plates} plates' if num_plates > 1 else
'is 1 plate')
additional_prompts.append(NUM_PLATES_PROMPT.format(num_plates_str=
num_plates_str))
if num_rows:
additional_prompts.append(ROWS_PROMPT.format(num_rows=num_rows))
if num_cols:
additional_prompts.append(COLS_PROMPT.format(num_cols=num_cols))
return '\n'.join(additional_prompts
) if additional_prompts else GENERIC_PLATES_PROMPT | null |
test_partial_functions_json_output_parser_diff | def input_iter(_: Any) ->Iterator[AIMessageChunk]:
for token in STREAMED_TOKENS:
yield AIMessageChunk(content='', additional_kwargs={'function_call':
{'arguments': token}})
chain = input_iter | JsonOutputFunctionsParser(diff=True)
assert list(chain.stream(None)) == EXPECTED_STREAMED_JSON_DIFF | def test_partial_functions_json_output_parser_diff() ->None:
def input_iter(_: Any) ->Iterator[AIMessageChunk]:
for token in STREAMED_TOKENS:
yield AIMessageChunk(content='', additional_kwargs={
'function_call': {'arguments': token}})
chain = input_iter | JsonOutputFunctionsParser(diff=True)
assert list(chain.stream(None)) == EXPECTED_STREAMED_JSON_DIFF | null |
_call | rellm = import_rellm()
from transformers import Text2TextGenerationPipeline
pipeline = cast(Text2TextGenerationPipeline, self.pipeline)
text = rellm.complete_re(prompt, self.regex, tokenizer=pipeline.tokenizer,
model=pipeline.model, max_new_tokens=self.max_new_tokens)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text | def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
rellm = import_rellm()
from transformers import Text2TextGenerationPipeline
pipeline = cast(Text2TextGenerationPipeline, self.pipeline)
text = rellm.complete_re(prompt, self.regex, tokenizer=pipeline.
tokenizer, model=pipeline.model, max_new_tokens=self.max_new_tokens)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text | null |
from_default | """Load with default LLMChain."""
llm = OpenAI(temperature=0.5, best_of=10, n=3, max_tokens=50)
return cls.from_llm(llm, objective, **kwargs) | @classmethod
def from_default(cls, objective: str, **kwargs: Any) ->NatBotChain:
"""Load with default LLMChain."""
llm = OpenAI(temperature=0.5, best_of=10, n=3, max_tokens=50)
return cls.from_llm(llm, objective, **kwargs) | Load with default LLMChain. |
on_text | """
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
resp: Dict[str, Any] = {}
resp.update({'action': 'on_text', 'text': text})
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html('### On Text'))
self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([resp]
)) + '\n') | def on_text(self, text: str, **kwargs: Any) ->None:
"""
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
resp: Dict[str, Any] = {}
resp.update({'action': 'on_text', 'text': text})
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html('### On Text'))
self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([
resp])) + '\n') | Run when agent is ending. |
similarity_search_with_score | """Return meilisearch documents most similar to the query, along with scores.
Args:
query (str): Query text for which to find similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
text and score for each.
"""
_query = self._embedding.embed_query(query)
docs = self.similarity_search_by_vector_with_scores(embedding=_query, k=k,
filter=filter, kwargs=kwargs)
return docs | def similarity_search_with_score(self, query: str, k: int=4, filter:
Optional[Dict[str, str]]=None, **kwargs: Any) ->List[Tuple[Document, float]
]:
"""Return meilisearch documents most similar to the query, along with scores.
Args:
query (str): Query text for which to find similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
text and score for each.
"""
_query = self._embedding.embed_query(query)
docs = self.similarity_search_by_vector_with_scores(embedding=_query, k
=k, filter=filter, kwargs=kwargs)
return docs | Return meilisearch documents most similar to the query, along with scores.
Args:
query (str): Query text for which to find similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
text and score for each. |
test_konko_streaming_model_name_test | """Check model info during streaming."""
chat_instance = ChatKonko(max_tokens=10, streaming=True)
msg = HumanMessage(content='Hi')
llm_data = chat_instance.generate([[msg]])
assert llm_data.llm_output is not None
assert llm_data.llm_output['model_name'] == chat_instance.model | def test_konko_streaming_model_name_test() ->None:
"""Check model info during streaming."""
chat_instance = ChatKonko(max_tokens=10, streaming=True)
msg = HumanMessage(content='Hi')
llm_data = chat_instance.generate([[msg]])
assert llm_data.llm_output is not None
assert llm_data.llm_output['model_name'] == chat_instance.model | Check model info during streaming. |
delete | """
Delete a Redis entry.
Args:
ids: List of ids (keys in redis) to delete.
redis_url: Redis connection url. This should be passed in the kwargs
or set as an environment variable: REDIS_URL.
Returns:
bool: Whether or not the deletions were successful.
Raises:
ValueError: If the redis python package is not installed.
ValueError: If the ids (keys in redis) are not provided
"""
redis_url = get_from_dict_or_env(kwargs, 'redis_url', 'REDIS_URL')
if ids is None:
raise ValueError("'ids' (keys)() were not provided.")
try:
import redis
except ImportError:
raise ValueError(
'Could not import redis python package. Please install it with `pip install redis`.'
)
try:
if 'redis_url' in kwargs:
kwargs.pop('redis_url')
client = get_client(redis_url=redis_url, **kwargs)
except ValueError as e:
raise ValueError(f'Your redis connected error: {e}')
try:
client.delete(*ids)
logger.info('Entries deleted')
return True
except:
return False | @staticmethod
def delete(ids: Optional[List[str]]=None, **kwargs: Any) ->bool:
"""
Delete a Redis entry.
Args:
ids: List of ids (keys in redis) to delete.
redis_url: Redis connection url. This should be passed in the kwargs
or set as an environment variable: REDIS_URL.
Returns:
bool: Whether or not the deletions were successful.
Raises:
ValueError: If the redis python package is not installed.
ValueError: If the ids (keys in redis) are not provided
"""
redis_url = get_from_dict_or_env(kwargs, 'redis_url', 'REDIS_URL')
if ids is None:
raise ValueError("'ids' (keys)() were not provided.")
try:
import redis
except ImportError:
raise ValueError(
'Could not import redis python package. Please install it with `pip install redis`.'
)
try:
if 'redis_url' in kwargs:
kwargs.pop('redis_url')
client = get_client(redis_url=redis_url, **kwargs)
except ValueError as e:
raise ValueError(f'Your redis connected error: {e}')
try:
client.delete(*ids)
logger.info('Entries deleted')
return True
except:
return False | Delete a Redis entry.
Args:
ids: List of ids (keys in redis) to delete.
redis_url: Redis connection url. This should be passed in the kwargs
or set as an environment variable: REDIS_URL.
Returns:
bool: Whether or not the deletions were successful.
Raises:
ValueError: If the redis python package is not installed.
ValueError: If the ids (keys in redis) are not provided |
test_table_info_w_sample_rows | """Test that table info is constructed properly."""
engine = create_engine('sqlite:///:memory:')
metadata_obj.create_all(engine)
values = [{'user_id': 13, 'user_name': 'Harrison', 'user_bio': 'bio'}, {
'user_id': 14, 'user_name': 'Chase', 'user_bio': 'bio'}]
stmt = insert(user).values(values)
with engine.begin() as conn:
conn.execute(stmt)
db = SQLDatabase(engine, sample_rows_in_table_info=2)
output = db.table_info
expected_output = """
CREATE TABLE company (
company_id INTEGER NOT NULL,
company_location VARCHAR NOT NULL,
PRIMARY KEY (company_id)
)
/*
2 rows from company table:
company_id company_location
*/
CREATE TABLE user (
user_id INTEGER NOT NULL,
user_name VARCHAR(16) NOT NULL,
user_bio TEXT,
PRIMARY KEY (user_id)
)
/*
2 rows from user table:
user_id user_name user_bio
13 Harrison bio
14 Chase bio
*/
"""
assert sorted(output.split()) == sorted(expected_output.split()) | def test_table_info_w_sample_rows() ->None:
"""Test that table info is constructed properly."""
engine = create_engine('sqlite:///:memory:')
metadata_obj.create_all(engine)
values = [{'user_id': 13, 'user_name': 'Harrison', 'user_bio': 'bio'},
{'user_id': 14, 'user_name': 'Chase', 'user_bio': 'bio'}]
stmt = insert(user).values(values)
with engine.begin() as conn:
conn.execute(stmt)
db = SQLDatabase(engine, sample_rows_in_table_info=2)
output = db.table_info
expected_output = """
CREATE TABLE company (
company_id INTEGER NOT NULL,
company_location VARCHAR NOT NULL,
PRIMARY KEY (company_id)
)
/*
2 rows from company table:
company_id company_location
*/
CREATE TABLE user (
user_id INTEGER NOT NULL,
user_name VARCHAR(16) NOT NULL,
user_bio TEXT,
PRIMARY KEY (user_id)
)
/*
2 rows from user table:
user_id user_name user_bio
13 Harrison bio
14 Chase bio
*/
"""
assert sorted(output.split()) == sorted(expected_output.split()) | Test that table info is constructed properly. |
test_all_imports | assert set(__all__) == set(EXPECTED_ALL) | def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL) | null |
is_lc_serializable | """Return whether this model can be serialized by Langchain."""
return True | @classmethod
def is_lc_serializable(cls) ->bool:
"""Return whether this model can be serialized by Langchain."""
return True | Return whether this model can be serialized by Langchain. |
_embed_query | if query is None or query == '' or query.isspace():
return None
else:
return self.embedding.embed_query(query) | def _embed_query(self, query: str) ->Optional[List[float]]:
if query is None or query == '' or query.isspace():
return None
else:
return self.embedding.embed_query(query) | null |
_prep_docs | embeddings: List[List[float]] = self._embed_fn.embed_documents(list(texts))
docs: List[TigrisDocument] = []
for t, m, e, _id in itertools.zip_longest(texts, metadatas or [],
embeddings or [], ids or []):
doc: TigrisDocument = {'text': t, 'embeddings': e or [], 'metadata': m or
{}}
if _id:
doc['id'] = _id
docs.append(doc)
return docs | def _prep_docs(self, texts: Iterable[str], metadatas: Optional[List[dict]],
ids: Optional[List[str]]) ->List[TigrisDocument]:
embeddings: List[List[float]] = self._embed_fn.embed_documents(list(texts))
docs: List[TigrisDocument] = []
for t, m, e, _id in itertools.zip_longest(texts, metadatas or [],
embeddings or [], ids or []):
doc: TigrisDocument = {'text': t, 'embeddings': e or [], 'metadata':
m or {}}
if _id:
doc['id'] = _id
docs.append(doc)
return docs | null |
plan | """Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
full_output = self.llm_chain.predict(callbacks=callbacks, **full_inputs)
return self.output_parser.parse(full_output) | def plan(self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks:
Callbacks=None, **kwargs: Any) ->Union[AgentAction, AgentFinish]:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
full_output = self.llm_chain.predict(callbacks=callbacks, **full_inputs)
return self.output_parser.parse(full_output) | Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use. |
_import_openllm | from langchain_community.llms.openllm import OpenLLM
return OpenLLM | def _import_openllm() ->Any:
from langchain_community.llms.openllm import OpenLLM
return OpenLLM | null |
get_client_info | """Returns a custom user agent header.
Args:
module (Optional[str]):
Optional. The module for a custom user agent header.
Returns:
google.api_core.gapic_v1.client_info.ClientInfo
"""
try:
from google.api_core.gapic_v1.client_info import ClientInfo
except ImportError as exc:
raise ImportError(
'Could not import ClientInfo. Please, install it with pip install google-api-core'
) from exc
langchain_version = metadata.version('langchain')
client_library_version = (f'{langchain_version}-{module}' if module else
langchain_version)
return ClientInfo(client_library_version=client_library_version, user_agent
=f'langchain/{client_library_version}') | def get_client_info(module: Optional[str]=None) ->'ClientInfo':
"""Returns a custom user agent header.
Args:
module (Optional[str]):
Optional. The module for a custom user agent header.
Returns:
google.api_core.gapic_v1.client_info.ClientInfo
"""
try:
from google.api_core.gapic_v1.client_info import ClientInfo
except ImportError as exc:
raise ImportError(
'Could not import ClientInfo. Please, install it with pip install google-api-core'
) from exc
langchain_version = metadata.version('langchain')
client_library_version = (f'{langchain_version}-{module}' if module else
langchain_version)
return ClientInfo(client_library_version=client_library_version,
user_agent=f'langchain/{client_library_version}') | Returns a custom user agent header.
Args:
module (Optional[str]):
Optional. The module for a custom user agent header.
Returns:
google.api_core.gapic_v1.client_info.ClientInfo |
_llm_type | """Return type of LLM."""
return 'nvidia-trt-llm' | @property
def _llm_type(self) ->str:
"""Return type of LLM."""
return 'nvidia-trt-llm' | Return type of LLM. |
stream | if type(self)._stream == BaseChatModel._stream:
yield cast(BaseMessageChunk, self.invoke(input, config=config, stop=
stop, **kwargs))
else:
config = ensure_config(config)
messages = self._convert_input(input).to_messages()
params = self._get_invocation_params(stop=stop, **kwargs)
options = {'stop': stop, **kwargs}
callback_manager = CallbackManager.configure(config.get('callbacks'),
self.callbacks, self.verbose, config.get('tags'), self.tags, config
.get('metadata'), self.metadata)
run_manager, = callback_manager.on_chat_model_start(dumpd(self), [
messages], invocation_params=params, options=options, name=config.
get('run_name'), batch_size=1)
generation: Optional[ChatGenerationChunk] = None
try:
for chunk in self._stream(messages, stop=stop, run_manager=
run_manager, **kwargs):
yield chunk.message
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
except BaseException as e:
run_manager.on_llm_error(e, response=LLMResult(generations=[[
generation]] if generation else []))
raise e
else:
run_manager.on_llm_end(LLMResult(generations=[[generation]])) | def stream(self, input: LanguageModelInput, config: Optional[RunnableConfig
]=None, *, stop: Optional[List[str]]=None, **kwargs: Any) ->Iterator[
BaseMessageChunk]:
if type(self)._stream == BaseChatModel._stream:
yield cast(BaseMessageChunk, self.invoke(input, config=config, stop
=stop, **kwargs))
else:
config = ensure_config(config)
messages = self._convert_input(input).to_messages()
params = self._get_invocation_params(stop=stop, **kwargs)
options = {'stop': stop, **kwargs}
callback_manager = CallbackManager.configure(config.get('callbacks'
), self.callbacks, self.verbose, config.get('tags'), self.tags,
config.get('metadata'), self.metadata)
run_manager, = callback_manager.on_chat_model_start(dumpd(self), [
messages], invocation_params=params, options=options, name=
config.get('run_name'), batch_size=1)
generation: Optional[ChatGenerationChunk] = None
try:
for chunk in self._stream(messages, stop=stop, run_manager=
run_manager, **kwargs):
yield chunk.message
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
except BaseException as e:
run_manager.on_llm_error(e, response=LLMResult(generations=[[
generation]] if generation else []))
raise e
else:
run_manager.on_llm_end(LLMResult(generations=[[generation]])) | null |
get_access_code_url | """Get the URL to get an access code."""
url = f'https://app.clickup.com/api?client_id={oauth_client_id}'
return f'{url}&redirect_uri={redirect_uri}' | @classmethod
def get_access_code_url(cls, oauth_client_id: str, redirect_uri: str=
'https://google.com') ->str:
"""Get the URL to get an access code."""
url = f'https://app.clickup.com/api?client_id={oauth_client_id}'
return f'{url}&redirect_uri={redirect_uri}' | Get the URL to get an access code. |
test_jinachat_generate | """Test JinaChat wrapper with generate."""
chat = JinaChat(max_tokens=10)
message = HumanMessage(content='Hello')
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content | def test_jinachat_generate() ->None:
"""Test JinaChat wrapper with generate."""
chat = JinaChat(max_tokens=10)
message = HumanMessage(content='Hello')
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content | Test JinaChat wrapper with generate. |
__hash__ | return hash((self.id, tuple(self.options.keys()), self.default)) | def __hash__(self) ->int:
return hash((self.id, tuple(self.options.keys()), self.default)) | null |
get_user_agent | from langchain_community import __version__
return f'langchain/{__version__}' | @staticmethod
def get_user_agent() ->str:
from langchain_community import __version__
return f'langchain/{__version__}' | null |
from_llm | """Get the response parser."""
task_creation_template = (
'You are an task creation AI that uses the result of an execution agent to create new tasks with the following objective: {objective}, The last completed task has the result: {result}. This result was based on this task description: {task_description}. These are incomplete tasks: {incomplete_tasks}. Based on the result, create new tasks to be completed by the AI system that do not overlap with incomplete tasks. Return the tasks as an array.'
)
prompt = PromptTemplate(template=task_creation_template, input_variables=[
'result', 'task_description', 'incomplete_tasks', 'objective'])
return cls(prompt=prompt, llm=llm, verbose=verbose) | @classmethod
def from_llm(cls, llm: BaseLanguageModel, verbose: bool=True) ->LLMChain:
"""Get the response parser."""
task_creation_template = (
'You are an task creation AI that uses the result of an execution agent to create new tasks with the following objective: {objective}, The last completed task has the result: {result}. This result was based on this task description: {task_description}. These are incomplete tasks: {incomplete_tasks}. Based on the result, create new tasks to be completed by the AI system that do not overlap with incomplete tasks. Return the tasks as an array.'
)
prompt = PromptTemplate(template=task_creation_template,
input_variables=['result', 'task_description', 'incomplete_tasks',
'objective'])
return cls(prompt=prompt, llm=llm, verbose=verbose) | Get the response parser. |
_get_connection | try:
import singlestoredb as s2
except ImportError:
raise ImportError(
'Could not import singlestoredb python package. Please install it with `pip install singlestoredb`.'
)
return s2.connect(**self.connection_kwargs) | def _get_connection(self) ->Any:
try:
import singlestoredb as s2
except ImportError:
raise ImportError(
'Could not import singlestoredb python package. Please install it with `pip install singlestoredb`.'
)
return s2.connect(**self.connection_kwargs) | null |
_prepare_output | """Prepare the output."""
parsed = result[self.output_key]
if RUN_KEY in result:
parsed[RUN_KEY] = result[RUN_KEY]
return parsed | def _prepare_output(self, result: dict) ->dict:
"""Prepare the output."""
parsed = result[self.output_key]
if RUN_KEY in result:
parsed[RUN_KEY] = result[RUN_KEY]
return parsed | Prepare the output. |
test_each_simple | """Test that each() works with a simple runnable."""
parser = FakeSplitIntoListParser()
assert parser.invoke('first item, second item') == ['first item', 'second item'
]
assert parser.map().invoke(['a, b', 'c']) == [['a', 'b'], ['c']]
assert parser.map().map().invoke([['a, b', 'c'], ['c, e']]) == [[['a', 'b'],
['c']], [['c', 'e']]] | def test_each_simple() ->None:
"""Test that each() works with a simple runnable."""
parser = FakeSplitIntoListParser()
assert parser.invoke('first item, second item') == ['first item',
'second item']
assert parser.map().invoke(['a, b', 'c']) == [['a', 'b'], ['c']]
assert parser.map().map().invoke([['a, b', 'c'], ['c, e']]) == [[['a',
'b'], ['c']], [['c', 'e']]] | Test that each() works with a simple runnable. |
submit | """Submit a function to the executor.
Args:
func (Callable[..., T]): The function to submit.
*args (Any): The positional arguments to the function.
**kwargs (Any): The keyword arguments to the function.
Returns:
Future[T]: The future for the function.
"""
return super().submit(cast(Callable[..., T], partial(copy_context().run,
func, *args, **kwargs))) | def submit(self, func: Callable[P, T], *args: P.args, **kwargs: P.kwargs
) ->Future[T]:
"""Submit a function to the executor.
Args:
func (Callable[..., T]): The function to submit.
*args (Any): The positional arguments to the function.
**kwargs (Any): The keyword arguments to the function.
Returns:
Future[T]: The future for the function.
"""
return super().submit(cast(Callable[..., T], partial(copy_context().run,
func, *args, **kwargs))) | Submit a function to the executor.
Args:
func (Callable[..., T]): The function to submit.
*args (Any): The positional arguments to the function.
**kwargs (Any): The keyword arguments to the function.
Returns:
Future[T]: The future for the function. |
clean_pdf | """Clean the PDF file.
Args:
contents: a PDF file contents.
Returns:
"""
contents = '\n'.join([line for line in contents.split('\n') if not line.
startswith('![]')])
contents = contents.replace('\\section{', '# ').replace('}', '')
contents = contents.replace('\\$', '$').replace('\\%', '%').replace('\\(', '('
).replace('\\)', ')')
return contents | def clean_pdf(self, contents: str) ->str:
"""Clean the PDF file.
Args:
contents: a PDF file contents.
Returns:
"""
contents = '\n'.join([line for line in contents.split('\n') if not line
.startswith('![]')])
contents = contents.replace('\\section{', '# ').replace('}', '')
contents = contents.replace('\\$', '$').replace('\\%', '%').replace('\\(',
'(').replace('\\)', ')')
return contents | Clean the PDF file.
Args:
contents: a PDF file contents.
Returns: |
_get_google_jobs | return GoogleJobsQueryRun(api_wrapper=GoogleJobsAPIWrapper(**kwargs)) | def _get_google_jobs(**kwargs: Any) ->BaseTool:
return GoogleJobsQueryRun(api_wrapper=GoogleJobsAPIWrapper(**kwargs)) | null |
get_tools | """Get the tools in the toolkit."""
return self.tools | def get_tools(self) ->List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools | Get the tools in the toolkit. |
is_lc_serializable | return False | @classmethod
def is_lc_serializable(cls) ->bool:
return False | null |
__init__ | super().__init__()
try:
from trubrics import Trubrics
except ImportError:
raise ImportError(
'The TrubricsCallbackHandler requires installation of the trubrics package. Please install it with `pip install trubrics`.'
)
self.trubrics = Trubrics(project=project, email=email or os.environ[
'TRUBRICS_EMAIL'], password=password or os.environ['TRUBRICS_PASSWORD'])
self.config_model: dict = {}
self.prompt: Optional[str] = None
self.messages: Optional[list] = None
self.trubrics_kwargs: Optional[dict] = kwargs if kwargs else None | def __init__(self, project: str='default', email: Optional[str]=None,
password: Optional[str]=None, **kwargs: Any) ->None:
super().__init__()
try:
from trubrics import Trubrics
except ImportError:
raise ImportError(
'The TrubricsCallbackHandler requires installation of the trubrics package. Please install it with `pip install trubrics`.'
)
self.trubrics = Trubrics(project=project, email=email or os.environ[
'TRUBRICS_EMAIL'], password=password or os.environ['TRUBRICS_PASSWORD']
)
self.config_model: dict = {}
self.prompt: Optional[str] = None
self.messages: Optional[list] = None
self.trubrics_kwargs: Optional[dict] = kwargs if kwargs else None | null |
get_arangodb_client | """Get the Arango DB client from credentials.
Args:
url: Arango DB url. Can be passed in as named arg or set as environment
var ``ARANGODB_URL``. Defaults to "http://localhost:8529".
dbname: Arango DB name. Can be passed in as named arg or set as
environment var ``ARANGODB_DBNAME``. Defaults to "_system".
username: Can be passed in as named arg or set as environment var
``ARANGODB_USERNAME``. Defaults to "root".
password: Can be passed ni as named arg or set as environment var
``ARANGODB_PASSWORD``. Defaults to "".
Returns:
An arango.database.StandardDatabase.
"""
try:
from arango import ArangoClient
except ImportError as e:
raise ImportError(
'Unable to import arango, please install with `pip install python-arango`.'
) from e
_url: str = url or os.environ.get('ARANGODB_URL', 'http://localhost:8529')
_dbname: str = dbname or os.environ.get('ARANGODB_DBNAME', '_system')
_username: str = username or os.environ.get('ARANGODB_USERNAME', 'root')
_password: str = password or os.environ.get('ARANGODB_PASSWORD', '')
return ArangoClient(_url).db(_dbname, _username, _password, verify=True) | def get_arangodb_client(url: Optional[str]=None, dbname: Optional[str]=None,
username: Optional[str]=None, password: Optional[str]=None) ->Any:
"""Get the Arango DB client from credentials.
Args:
url: Arango DB url. Can be passed in as named arg or set as environment
var ``ARANGODB_URL``. Defaults to "http://localhost:8529".
dbname: Arango DB name. Can be passed in as named arg or set as
environment var ``ARANGODB_DBNAME``. Defaults to "_system".
username: Can be passed in as named arg or set as environment var
``ARANGODB_USERNAME``. Defaults to "root".
password: Can be passed ni as named arg or set as environment var
``ARANGODB_PASSWORD``. Defaults to "".
Returns:
An arango.database.StandardDatabase.
"""
try:
from arango import ArangoClient
except ImportError as e:
raise ImportError(
'Unable to import arango, please install with `pip install python-arango`.'
) from e
_url: str = url or os.environ.get('ARANGODB_URL', 'http://localhost:8529')
_dbname: str = dbname or os.environ.get('ARANGODB_DBNAME', '_system')
_username: str = username or os.environ.get('ARANGODB_USERNAME', 'root')
_password: str = password or os.environ.get('ARANGODB_PASSWORD', '')
return ArangoClient(_url).db(_dbname, _username, _password, verify=True) | Get the Arango DB client from credentials.
Args:
url: Arango DB url. Can be passed in as named arg or set as environment
var ``ARANGODB_URL``. Defaults to "http://localhost:8529".
dbname: Arango DB name. Can be passed in as named arg or set as
environment var ``ARANGODB_DBNAME``. Defaults to "_system".
username: Can be passed in as named arg or set as environment var
``ARANGODB_USERNAME``. Defaults to "root".
password: Can be passed ni as named arg or set as environment var
``ARANGODB_PASSWORD``. Defaults to "".
Returns:
An arango.database.StandardDatabase. |
test_complex_question | """Test complex question that should need python."""
question = 'What is the square root of 2?'
output = fake_llm_math_chain.run(question)
assert output == f'Answer: {2 ** 0.5}' | @pytest.mark.requires('numexpr')
def test_complex_question(fake_llm_math_chain: LLMMathChain) ->None:
"""Test complex question that should need python."""
question = 'What is the square root of 2?'
output = fake_llm_math_chain.run(question)
assert output == f'Answer: {2 ** 0.5}' | Test complex question that should need python. |
_get_run_type | if isinstance(run.run_type, str):
return run.run_type
elif hasattr(run.run_type, 'value'):
return run.run_type.value
else:
return str(run.run_type) | def _get_run_type(run: 'Run') ->str:
if isinstance(run.run_type, str):
return run.run_type
elif hasattr(run.run_type, 'value'):
return run.run_type.value
else:
return str(run.run_type) | null |
pending | return self.status == 'pending' | def pending(self) ->bool:
return self.status == 'pending' | null |
test_api_key_is_secret_string | llm = ChatJavelinAIGateway(gateway_uri='<javelin-ai-gateway-uri>', route=
'<javelin-ai-gateway-chat-route>', javelin_api_key='secret-api-key',
params={'temperature': 0.1})
assert isinstance(llm.javelin_api_key, SecretStr)
assert llm.javelin_api_key.get_secret_value() == 'secret-api-key' | @pytest.mark.requires('javelin_sdk')
def test_api_key_is_secret_string() ->None:
llm = ChatJavelinAIGateway(gateway_uri='<javelin-ai-gateway-uri>',
route='<javelin-ai-gateway-chat-route>', javelin_api_key=
'secret-api-key', params={'temperature': 0.1})
assert isinstance(llm.javelin_api_key, SecretStr)
assert llm.javelin_api_key.get_secret_value() == 'secret-api-key' | null |
add_documents | doc_ids = [str(uuid.uuid4()) for _ in doc_contents]
summary_docs = [Document(page_content=s, metadata={id_key: doc_ids[i]}) for
i, s in enumerate(doc_summaries)]
retriever.vectorstore.add_documents(summary_docs)
retriever.docstore.mset(list(zip(doc_ids, doc_contents))) | def add_documents(retriever, doc_summaries, doc_contents):
doc_ids = [str(uuid.uuid4()) for _ in doc_contents]
summary_docs = [Document(page_content=s, metadata={id_key: doc_ids[i]}) for
i, s in enumerate(doc_summaries)]
retriever.vectorstore.add_documents(summary_docs)
retriever.docstore.mset(list(zip(doc_ids, doc_contents))) | null |
test_replicate_input | llm = Replicate(model=TEST_MODEL, input={'max_length': 10})
assert llm.model_kwargs == {'max_length': 10} | def test_replicate_input() ->None:
llm = Replicate(model=TEST_MODEL, input={'max_length': 10})
assert llm.model_kwargs == {'max_length': 10} | null |
_get_dimension_values | """Makes a call to Cube's REST API load endpoint to retrieve
values for dimensions.
These values can be used to achieve a more accurate filtering.
"""
logger.info('Loading dimension values for: {dimension_name}...')
headers = {'Content-Type': 'application/json', 'Authorization': self.
cube_api_token}
query = {'query': {'dimensions': [dimension_name], 'limit': self.
dimension_values_limit}}
retries = 0
while retries < self.dimension_values_max_retries:
response = requests.request('POST', f'{self.cube_api_url}/load',
headers=headers, data=json.dumps(query))
if response.status_code == 200:
response_data = response.json()
if 'error' in response_data and response_data['error'
] == 'Continue wait':
logger.info('Retrying...')
retries += 1
time.sleep(self.dimension_values_retry_delay)
continue
else:
dimension_values = [item[dimension_name] for item in
response_data['data']]
return dimension_values
else:
logger.error('Request failed with status code:', response.status_code)
break
if retries == self.dimension_values_max_retries:
logger.info('Maximum retries reached.')
return [] | def _get_dimension_values(self, dimension_name: str) ->List[str]:
"""Makes a call to Cube's REST API load endpoint to retrieve
values for dimensions.
These values can be used to achieve a more accurate filtering.
"""
logger.info('Loading dimension values for: {dimension_name}...')
headers = {'Content-Type': 'application/json', 'Authorization': self.
cube_api_token}
query = {'query': {'dimensions': [dimension_name], 'limit': self.
dimension_values_limit}}
retries = 0
while retries < self.dimension_values_max_retries:
response = requests.request('POST', f'{self.cube_api_url}/load',
headers=headers, data=json.dumps(query))
if response.status_code == 200:
response_data = response.json()
if 'error' in response_data and response_data['error'
] == 'Continue wait':
logger.info('Retrying...')
retries += 1
time.sleep(self.dimension_values_retry_delay)
continue
else:
dimension_values = [item[dimension_name] for item in
response_data['data']]
return dimension_values
else:
logger.error('Request failed with status code:', response.
status_code)
break
if retries == self.dimension_values_max_retries:
logger.info('Maximum retries reached.')
return [] | Makes a call to Cube's REST API load endpoint to retrieve
values for dimensions.
These values can be used to achieve a more accurate filtering. |
input_mapper | return [HumanMessage(content=d['some_input'])] | def input_mapper(d: dict) ->List[BaseMessage]:
return [HumanMessage(content=d['some_input'])] | null |
_import_portkey | from langchain_community.utilities.portkey import Portkey
return Portkey | def _import_portkey() ->Any:
from langchain_community.utilities.portkey import Portkey
return Portkey | null |
_stream | generation_config = kwargs.get('generation_config', {})
if stop:
generation_config['stop_sequences'] = stop
for stream_resp in _completion_with_retry(self, prompt, stream=True,
is_gemini=True, run_manager=run_manager, generation_config=
generation_config, **kwargs):
chunk = GenerationChunk(text=stream_resp.text)
yield chunk
if run_manager:
run_manager.on_llm_new_token(stream_resp.text, chunk=chunk, verbose
=self.verbose) | def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[
GenerationChunk]:
generation_config = kwargs.get('generation_config', {})
if stop:
generation_config['stop_sequences'] = stop
for stream_resp in _completion_with_retry(self, prompt, stream=True,
is_gemini=True, run_manager=run_manager, generation_config=
generation_config, **kwargs):
chunk = GenerationChunk(text=stream_resp.text)
yield chunk
if run_manager:
run_manager.on_llm_new_token(stream_resp.text, chunk=chunk,
verbose=self.verbose) | null |
_default_params | """Get the default parameters for calling Ollama."""
return {'model': self.model, 'format': self.format, 'options': {'mirostat':
self.mirostat, 'mirostat_eta': self.mirostat_eta, 'mirostat_tau': self.
mirostat_tau, 'num_ctx': self.num_ctx, 'num_gpu': self.num_gpu,
'num_thread': self.num_thread, 'repeat_last_n': self.repeat_last_n,
'repeat_penalty': self.repeat_penalty, 'temperature': self.temperature,
'stop': self.stop, 'tfs_z': self.tfs_z, 'top_k': self.top_k, 'top_p':
self.top_p}, 'system': self.system, 'template': self.template} | @property
def _default_params(self) ->Dict[str, Any]:
"""Get the default parameters for calling Ollama."""
return {'model': self.model, 'format': self.format, 'options': {
'mirostat': self.mirostat, 'mirostat_eta': self.mirostat_eta,
'mirostat_tau': self.mirostat_tau, 'num_ctx': self.num_ctx,
'num_gpu': self.num_gpu, 'num_thread': self.num_thread,
'repeat_last_n': self.repeat_last_n, 'repeat_penalty': self.
repeat_penalty, 'temperature': self.temperature, 'stop': self.stop,
'tfs_z': self.tfs_z, 'top_k': self.top_k, 'top_p': self.top_p},
'system': self.system, 'template': self.template} | Get the default parameters for calling Ollama. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.